diff --git a/build.sh b/build.sh
index 6cf21b7..de9c241 100644
--- a/build.sh
+++ b/build.sh
@@ -15,3 +15,5 @@ helm template superset https://github.com/apache/superset/releases/download/supe
docker buildx build --push -t registry.dev.k8s.transcity/reports/superset:5.0.0 superset-image
+
+helm template ch-operator https://github.com/Altinity/helm-charts/releases/download/release-0.25.6/altinity-clickhouse-operator-0.25.6.tgz --namespace reports-clickhouse --values clickhouse-operator-values.yaml > clickhouse/base/clickhouse-operator.yaml
diff --git a/clickhouse-operator-values-ref.yaml b/clickhouse-operator-values-ref.yaml
new file mode 100644
index 0000000..e0a13e1
--- /dev/null
+++ b/clickhouse-operator-values-ref.yaml
@@ -0,0 +1,931 @@
+namespaceOverride: ""
+# commonLabels -- set of labels that will be applied to all the resources for the operator
+commonLabels: {}
+# commonAnnotations -- set of annotations that will be applied to all the resources for the operator
+commonAnnotations: {}
+deployment:
+ # look details in `kubectl explain deployment.spec.strategy`
+ strategy:
+ type: Recreate
+crdHook:
+ # crdHook.enabled -- enable automatic CRD installation/update via pre-install/pre-upgrade hooks
+ # when disabled, CRDs must be installed manually using kubectl apply
+ enabled: true
+ image:
+ # crdHook.image.repository -- image repository for CRD installation job
+ repository: bitnami/kubectl
+ # crdHook.image.tag -- image tag for CRD installation job
+ tag: "latest"
+ # crdHook.image.pullPolicy -- image pull policy for CRD installation job
+ pullPolicy: IfNotPresent
+ # crdHook.resources -- resource limits and requests for CRD installation job
+ resources: {}
+ # limits:
+ # cpu: 100m
+ # memory: 128Mi
+ # requests:
+ # cpu: 100m
+ # memory: 128Mi
+ # crdHook.nodeSelector -- node selector for CRD installation job
+ nodeSelector: {}
+ # crdHook.tolerations -- tolerations for CRD installation job
+ tolerations: []
+ # crdHook.affinity -- affinity for CRD installation job
+ affinity: {}
+operator:
+ image:
+ # operator.image.repository -- image repository
+ repository: altinity/clickhouse-operator
+ # operator.image.tag -- image tag (chart's appVersion value will be used if not set)
+ tag: ""
+ # operator.image.pullPolicy -- image pull policy
+ pullPolicy: IfNotPresent
+ containerSecurityContext: {}
+ # operator.resources -- custom resource configuration, check `kubectl explain pod.spec.containers.resources` for details
+ resources: {}
+ # limits:
+ # cpu: 100m
+ # memory: 128Mi
+ # requests:
+ # cpu: 100m
+ # memory: 128Mi
+
+ # operator.priorityClassName -- priority class name for the clickhouse-operator deployment, check `kubectl explain pod.spec.priorityClassName` for details
+ # @default -- ""
+ priorityClassName: ""
+ # operator.env -- additional environment variables for the clickhouse-operator container in deployment
+ # possible format value `[{"name": "SAMPLE", "value": "text"}]`
+ env: []
+metrics:
+ enabled: true
+ image:
+ # metrics.image.repository -- image repository
+ repository: altinity/metrics-exporter
+ # metrics.image.tag -- image tag (chart's appVersion value will be used if not set)
+ tag: ""
+ # metrics.image.pullPolicy -- image pull policy
+ pullPolicy: IfNotPresent
+ containerSecurityContext: {}
+ # metrics.resources -- custom resource configuration
+ resources: {}
+ # limits:
+ # cpu: 100m
+ # memory: 128Mi
+ # requests:
+ # cpu: 100m
+ # memory: 128Mi
+
+ # metrics.env -- additional environment variables for the deployment of metrics-exporter containers
+ # possible format value `[{"name": "SAMPLE", "value": "text"}]`
+ env: []
+# imagePullSecrets -- image pull secret for private images in clickhouse-operator pod
+# possible value format `[{"name":"your-secret-name"}]`,
+# check `kubectl explain pod.spec.imagePullSecrets` for details
+imagePullSecrets: []
+# podLabels -- labels to add to the clickhouse-operator pod
+podLabels: {}
+# podAnnotations -- annotations to add to the clickhouse-operator pod, check `kubectl explain pod.spec.annotations` for details
+# @default -- check the `values.yaml` file
+podAnnotations:
+ prometheus.io/port: '8888'
+ prometheus.io/scrape: 'true'
+ clickhouse-operator-metrics/port: '9999'
+ clickhouse-operator-metrics/scrape: 'true'
+# nameOverride -- override name of the chart
+nameOverride: ""
+# fullnameOverride -- full name of the chart.
+fullnameOverride: ""
+serviceAccount:
+ # serviceAccount.create -- specifies whether a service account should be created
+ create: true
+ # serviceAccount.annotations -- annotations to add to the service account
+ annotations: {}
+ # serviceAccount.name -- the name of the service account to use; if not set and create is true, a name is generated using the fullname template
+ name:
+rbac:
+ # rbac.create -- specifies whether rbac resources should be created
+ create: true
+ # rbac.namespaceScoped -- specifies whether to create roles and rolebindings at the cluster level or namespace level
+ namespaceScoped: false
+secret:
+ # secret.create -- create a secret with operator credentials
+ create: true
+ # secret.username -- operator credentials username
+ username: clickhouse_operator
+ # secret.password -- operator credentials password
+ password: clickhouse_operator_password
+# nodeSelector -- node for scheduler pod assignment, check `kubectl explain pod.spec.nodeSelector` for details
+nodeSelector: {}
+# tolerations -- tolerations for scheduler pod assignment, check `kubectl explain pod.spec.tolerations` for details
+tolerations: []
+# affinity -- affinity for scheduler pod assignment, check `kubectl explain pod.spec.affinity` for details
+affinity: {}
+# podSecurityContext - operator deployment SecurityContext, check `kubectl explain pod.spec.securityContext` for details
+podSecurityContext: {}
+# topologySpreadConstraints - topologySpreadConstraints affinity for scheduler pod assignment, check `kubectl explain pod.spec.topologySpreadConstraints` for details
+topologySpreadConstraints: []
+serviceMonitor:
+ # serviceMonitor.enabled -- ServiceMonitor Custom resource is created for a [prometheus-operator](https://github.com/prometheus-operator/prometheus-operator)
+ # In serviceMonitor will be created two endpoints ch-metrics on port 8888 and op-metrics # 9999. Ypu can specify interval, scrapeTimeout, relabelings, metricRelabelings for each endpoint below
+ enabled: false
+ # serviceMonitor.additionalLabels -- additional labels for service monitor
+ additionalLabels: {}
+ clickhouseMetrics:
+ # serviceMonitor.interval for ch-metrics endpoint --
+ interval: 30s
+ # serviceMonitor.scrapeTimeout for ch-metrics endpoint -- Prometheus ServiceMonitor scrapeTimeout. If empty, Prometheus uses the global scrape timeout unless it is less than the target's scrape interval value in which the latter is used.
+ scrapeTimeout: ""
+ # serviceMonitor.relabelings for ch-metrics endpoint -- Prometheus [RelabelConfigs] to apply to samples before scraping
+ relabelings: []
+ # serviceMonitor.metricRelabelings for ch-metrics endpoint -- Prometheus [MetricRelabelConfigs] to apply to samples before ingestio
+ metricRelabelings: []
+ operatorMetrics:
+ # serviceMonitor.interval for op-metrics endpoint --
+ interval: 30s
+ # serviceMonitor.scrapeTimeout for op-metrics endpoint -- Prometheus ServiceMonitor scrapeTimeout. If empty, Prometheus uses the global scrape timeout unless it is less than the target's scrape interval value in which the latter is used.
+ scrapeTimeout: ""
+ # serviceMonitor.relabelings for op-metrics endpoint -- Prometheus [RelabelConfigs] to apply to samples before scraping
+ relabelings: []
+ # serviceMonitor.metricRelabelings for op-metrics endpoint -- Prometheus [MetricRelabelConfigs] to apply to samples before ingestio
+ metricRelabelings: []
+# configs -- clickhouse operator configs
+# @default -- check the `values.yaml` file for the config content (auto-generated from latest operator release)
+configs:
+ confdFiles: null
+ configdFiles:
+ 01-clickhouse-01-listen.xml: |
+
+
+
+
+
+
+
+
+ ::
+ 0.0.0.0
+ 1
+
+ 01-clickhouse-02-logger.xml: |
+
+
+
+
+
+
+
+
+
+ debug
+ /var/log/clickhouse-server/clickhouse-server.log
+ /var/log/clickhouse-server/clickhouse-server.err.log
+ 1000M
+ 10
+
+ 1
+
+
+ 01-clickhouse-03-query_log.xml: |
+
+
+
+
+
+
+
+
+ system
+
+ Engine = MergeTree PARTITION BY event_date ORDER BY event_time TTL event_date + interval 30 day
+ 7500
+
+
+
+ 01-clickhouse-04-part_log.xml: |
+
+
+
+
+
+
+
+
+ system
+
+ Engine = MergeTree PARTITION BY event_date ORDER BY event_time TTL event_date + interval 30 day
+ 7500
+
+
+ 01-clickhouse-05-trace_log.xml: |-
+
+
+
+
+
+
+
+
+ system
+
+ Engine = MergeTree PARTITION BY event_date ORDER BY event_time TTL event_date + interval 30 day
+ 7500
+
+
+ files:
+ config.yaml:
+ # IMPORTANT
+ # This file is auto-generated
+ # Do not edit this file - all changes would be lost
+ # Edit appropriate template in the following folder:
+ # deploy/builder/templates-config
+ # IMPORTANT
+ #
+ # Template parameters available:
+ # WATCH_NAMESPACES=
+ # CH_USERNAME_PLAIN=
+ # CH_PASSWORD_PLAIN=
+ # CH_CREDENTIALS_SECRET_NAMESPACE=
+ # CH_CREDENTIALS_SECRET_NAME=clickhouse-operator
+ # VERBOSITY=1
+
+ ################################################
+ ##
+ ## Watch section
+ ##
+ ################################################
+ watch:
+ # List of namespaces where clickhouse-operator watches for events.
+ # Concurrently running operators should watch on different namespaces.
+ # IMPORTANT
+ # Regexp is applicable.
+ namespaces: []
+ clickhouse:
+ configuration:
+ ################################################
+ ##
+ ## Configuration files section
+ ##
+ ################################################
+ file:
+ # Each 'path' can be either absolute or relative.
+ # In case path is absolute - it is used as is
+ # In case path is relative - it is relative to the folder where configuration file you are reading right now is located.
+ path:
+ # Path to the folder where ClickHouse configuration files common for all instances within a CHI are located.
+ common: chi/config.d
+ # Path to the folder where ClickHouse configuration files unique for each instance (host) within a CHI are located.
+ host: chi/conf.d
+ # Path to the folder where ClickHouse configuration files with users' settings are located.
+ # Files are common for all instances within a CHI.
+ user: chi/users.d
+ ################################################
+ ##
+ ## Configuration users section
+ ##
+ ################################################
+ user:
+ # Default settings for user accounts, created by the operator.
+ # IMPORTANT. These are not access credentials or settings for 'default' user account,
+ # it is a template for filling out missing fields for all user accounts to be created by the operator,
+ # with the following EXCEPTIONS:
+ # 1. 'default' user account DOES NOT use provided password, but uses all the rest of the fields.
+ # Password for 'default' user account has to be provided explicitly, if to be used.
+ # 2. CHOP user account DOES NOT use:
+ # - profile setting. It uses predefined profile called 'clickhouse_operator'
+ # - quota setting. It uses empty quota name.
+ # - networks IP setting. Operator specifies 'networks/ip' user setting to match operators' pod IP only.
+ # - password setting. Password for CHOP account is used from 'clickhouse.access.*' section
+ default:
+ # Default values for ClickHouse user account(s) created by the operator
+ # 1. user/profile - string
+ # 2. user/quota - string
+ # 3. user/networks/ip - multiple strings
+ # 4. user/password - string
+ # These values can be overwritten on per-user basis.
+ profile: "default"
+ quota: "default"
+ networksIP:
+ - "::1"
+ - "127.0.0.1"
+ password: "default"
+ ################################################
+ ##
+ ## Configuration network section
+ ##
+ ################################################
+ network:
+ # Default host_regexp to limit network connectivity from outside
+ hostRegexpTemplate: "(chi-{chi}-[^.]+\\d+-\\d+|clickhouse\\-{chi})\\.{namespace}\\.svc\\.cluster\\.local$"
+ ################################################
+ ##
+ ## Configuration restart policy section
+ ## Configuration restart policy describes what configuration changes require ClickHouse restart
+ ##
+ ################################################
+ configurationRestartPolicy:
+ rules:
+ # IMPORTANT!
+ # Special version of "*" - default version - has to satisfy all ClickHouse versions.
+ # Default version will also be used in case ClickHouse version is unknown.
+ # ClickHouse version may be unknown due to host being down - for example, because of incorrect "settings" section.
+ # ClickHouse is not willing to start in case incorrect/unknown settings are provided in config file.
+ - version: "*"
+ rules:
+ # see https://kb.altinity.com/altinity-kb-setup-and-maintenance/altinity-kb-server-config-files/#server-config-configxml-sections-which-dont-require-restart
+ # to be replaced with "select * from system.server_settings where changeable_without_restart = 'No'"
+ - settings/*: "yes"
+ # single values
+ - settings/access_control_path: "no"
+ - settings/dictionaries_config: "no"
+ - settings/max_server_memory_*: "no"
+ - settings/max_*_to_drop: "no"
+ - settings/max_concurrent_queries: "no"
+ - settings/models_config: "no"
+ - settings/user_defined_executable_functions_config: "no"
+ # structured XML
+ - settings/logger/*: "no"
+ - settings/macros/*: "no"
+ - settings/remote_servers/*: "no"
+ - settings/user_directories/*: "no"
+ # these settings should not lead to pod restarts
+ - settings/display_secrets_in_show_and_select: "no"
+ - zookeeper/*: "no"
+ - files/*.xml: "yes"
+ - files/config.d/*.xml: "yes"
+ - files/config.d/*dict*.xml: "no"
+ - files/config.d/*no_restart*: "no"
+ # exceptions in default profile
+ - profiles/default/background_*_pool_size: "yes"
+ - profiles/default/max_*_for_server: "yes"
+ - version: "21.*"
+ rules:
+ - settings/logger: "yes"
+ #################################################
+ ##
+ ## Access to ClickHouse instances
+ ##
+ ################################################
+ access:
+ # Possible values for 'scheme' are:
+ # 1. http - force http to be used to connect to ClickHouse instances
+ # 2. https - force https to be used to connect to ClickHouse instances
+ # 3. auto - either http or https is selected based on open ports
+ scheme: "auto"
+ # ClickHouse credentials (username, password and port) to be used by the operator to connect to ClickHouse instances.
+ # These credentials are used for:
+ # 1. Metrics requests
+ # 2. Schema maintenance
+ # User with these credentials can be specified in additional ClickHouse .xml config files,
+ # located in 'clickhouse.configuration.file.path.user' folder
+ username: ""
+ password: ""
+ rootCA: ""
+ # Location of the k8s Secret with username and password to be used by the operator to connect to ClickHouse instances.
+ # Can be used instead of explicitly specified username and password available in sections:
+ # - clickhouse.access.username
+ # - clickhouse.access.password
+ # Secret should have two keys:
+ # 1. username
+ # 2. password
+ secret:
+ # Empty `namespace` means that k8s secret would be looked in the same namespace where operator's pod is running.
+ namespace: ""
+ # Empty `name` means no k8s Secret would be looked for
+ name: '{{ include "altinity-clickhouse-operator.fullname" . }}'
+ # Port where to connect to ClickHouse instances to
+ port: 8123
+ # Timeouts used to limit connection and queries from the operator to ClickHouse instances
+ # Specified in seconds.
+ timeouts:
+ # Timout to setup connection from the operator to ClickHouse instances. In seconds.
+ connect: 1
+ # Timout to perform SQL query from the operator to ClickHouse instances. In seconds.
+ query: 4
+ ################################################
+ ##
+ ## Addons specifies additional configuration sections
+ ## Should it be called something like "templates"?
+ ##
+ ################################################
+ addons:
+ rules:
+ - version: "*"
+ spec:
+ configuration:
+ users:
+ profiles:
+ quotas:
+ settings:
+ files:
+ - version: ">= 23.3"
+ spec:
+ configuration:
+ ###
+ ### users.d is global while description depends on CH version which may vary on per-host basis
+ ### In case of global-ness this may be better to implement via auto-templates
+ ###
+ ### As a solution, this may be applied on the whole cluster based on any of its hosts
+ ###
+ ### What to do when host is just created? CH version is not known prior to CH started and user config is required before CH started.
+ ### We do not have any info about the cluster on initial creation
+ ###
+ users:
+ "{clickhouseOperatorUser}/access_management": 1
+ "{clickhouseOperatorUser}/named_collection_control": 1
+ "{clickhouseOperatorUser}/show_named_collections": 1
+ "{clickhouseOperatorUser}/show_named_collections_secrets": 1
+ profiles:
+ quotas:
+ settings:
+ files:
+ - version: ">= 23.5"
+ spec:
+ configuration:
+ users:
+ profiles:
+ clickhouse_operator/format_display_secrets_in_show_and_select: 1
+ quotas:
+ settings:
+ ##
+ ## this may be added on per-host basis into host's conf.d folder
+ ##
+ display_secrets_in_show_and_select: 1
+ files:
+ #################################################
+ ##
+ ## Metrics collection
+ ##
+ ################################################
+ metrics:
+ # Timeouts used to limit connection and queries from the metrics exporter to ClickHouse instances
+ # Specified in seconds.
+ timeouts:
+ # Timeout used to limit metrics collection request. In seconds.
+ # Upon reaching this timeout metrics collection is aborted and no more metrics are collected in this cycle.
+ # All collected metrics are returned.
+ collect: 9
+ keeper:
+ configuration:
+ ################################################
+ ##
+ ## Configuration files section
+ ##
+ ################################################
+ file:
+ # Each 'path' can be either absolute or relative.
+ # In case path is absolute - it is used as is
+ # In case path is relative - it is relative to the folder where configuration file you are reading right now is located.
+ path:
+ # Path to the folder where Keeper configuration files common for all instances within a CHK are located.
+ common: chk/keeper_config.d
+ # Path to the folder where Keeper configuration files unique for each instance (host) within a CHK are located.
+ host: chk/conf.d
+ # Path to the folder where Keeper configuration files with users' settings are located.
+ # Files are common for all instances within a CHI.
+ user: chk/users.d
+ ################################################
+ ##
+ ## Template(s) management section
+ ##
+ ################################################
+ template:
+ chi:
+ # CHI template updates handling policy
+ # Possible policy values:
+ # - ReadOnStart. Accept CHIT updates on the operator's start only.
+ # - ApplyOnNextReconcile. Accept CHIT updates at all time. Apply new CHITs on next regular reconcile of the CHI
+ policy: ApplyOnNextReconcile
+ # Path to the folder where ClickHouseInstallation templates .yaml manifests are located.
+ # Templates are added to the list of all templates and used when CHI is reconciled.
+ # Templates are applied in sorted alpha-numeric order.
+ path: chi/templates.d
+ chk:
+ # CHK template updates handling policy
+ # Possible policy values:
+ # - ReadOnStart. Accept CHIT updates on the operators start only.
+ # - ApplyOnNextReconcile. Accept CHIT updates at all time. Apply new CHITs on next regular reconcile of the CHI
+ policy: ApplyOnNextReconcile
+ # Path to the folder where ClickHouseInstallation templates .yaml manifests are located.
+ # Templates are added to the list of all templates and used when CHI is reconciled.
+ # Templates are applied in sorted alpha-numeric order.
+ path: chk/templates.d
+ ################################################
+ ##
+ ## Reconcile section
+ ##
+ ################################################
+ reconcile:
+ # Reconcile runtime settings
+ runtime:
+ # Max number of concurrent CHI reconciles in progress
+ reconcileCHIsThreadsNumber: 10
+ # The operator reconciles shards concurrently in each CHI with the following limitations:
+ # 1. Number of shards being reconciled (and thus having hosts down) in each CHI concurrently
+ # can not be greater than 'reconcileShardsThreadsNumber'.
+ # 2. Percentage of shards being reconciled (and thus having hosts down) in each CHI concurrently
+ # can not be greater than 'reconcileShardsMaxConcurrencyPercent'.
+ # 3. The first shard is always reconciled alone. Concurrency starts from the second shard and onward.
+ # Thus limiting number of shards being reconciled (and thus having hosts down) in each CHI by both number and percentage
+
+ # Max number of concurrent shard reconciles within one cluster in progress
+ reconcileShardsThreadsNumber: 5
+ # Max percentage of concurrent shard reconciles within one cluster in progress
+ reconcileShardsMaxConcurrencyPercent: 50
+ # Reconcile StatefulSet scenario
+ statefulSet:
+ # Create StatefulSet scenario
+ create:
+ # What to do in case created StatefulSet is not in 'Ready' after `reconcile.statefulSet.update.timeout` seconds
+ # Possible options:
+ # 1. abort - abort the process, do nothing with the problematic StatefulSet, leave it as it is,
+ # do not try to fix or delete or update it, just abort reconcile cycle.
+ # Do not proceed to the next StatefulSet(s) and wait for an admin to assist.
+ # 2. delete - delete newly created problematic StatefulSet and follow 'abort' path afterwards.
+ # 3. ignore - ignore an error, pretend nothing happened, continue reconcile and move on to the next StatefulSet.
+ onFailure: ignore
+ # Update StatefulSet scenario
+ update:
+ # How many seconds to wait for created/updated StatefulSet to be 'Ready'
+ timeout: 300
+ # How many seconds to wait between checks/polls for created/updated StatefulSet status
+ pollInterval: 5
+ # What to do in case updated StatefulSet is not in 'Ready' after `reconcile.statefulSet.update.timeout` seconds
+ # Possible options:
+ # 1. abort - abort the process, do nothing with the problematic StatefulSet, leave it as it is,
+ # do not try to fix or delete or update it, just abort reconcile cycle.
+ # Do not proceed to the next StatefulSet(s) and wait for an admin to assist.
+ # 2. rollback - delete Pod and rollback StatefulSet to previous Generation.
+ # Pod would be recreated by StatefulSet based on rollback-ed StatefulSet configuration.
+ # Follow 'abort' path afterwards.
+ # 3. ignore - ignore an error, pretend nothing happened, continue reconcile and move on to the next StatefulSet.
+ onFailure: abort
+ # Reconcile Host scenario
+ host:
+ # The operator during reconcile procedure should wait for a ClickHouse host to achieve the following conditions:
+ wait:
+ # Whether the operator during reconcile procedure should wait for a ClickHouse host:
+ # - to be excluded from a ClickHouse cluster
+ # - to complete all running queries
+ # - to be included into a ClickHouse cluster
+ # respectfully before moving forward with host reconcile
+ exclude: true
+ queries: true
+ include: false
+ # The operator during reconcile procedure should wait for replicas to catch-up
+ # replication delay a.k.a replication lag for the following replicas
+ replicas:
+ # All replicas (new and known earlier) are explicitly requested to wait for replication to catch-up
+ all: no
+ # New replicas only are requested to wait for replication to catch-up
+ new: yes
+ # Replication catch-up is considered to be completed as soon as replication delay
+ # a.k.a replication lag - calculated as "MAX(absolute_delay) FROM system.replicas"
+ # is within this specified delay (in seconds)
+ delay: 10
+ probes:
+ # Whether the operator during host launch procedure should wait for startup probe to succeed.
+ # In case probe is unspecified wait is assumed to be completed successfully.
+ # Default option value is to do not wait.
+ startup: no
+ # Whether the operator during host launch procedure should wait for readiness probe to succeed.
+ # In case probe is unspecified wait is assumed to be completed successfully.
+ # Default option value is to wait.
+ readiness: yes
+ # The operator during reconcile procedure should drop the following entities:
+ drop:
+ replicas:
+ # Whether the operator during reconcile procedure should drop replicas when replica is deleted
+ onDelete: yes
+ # Whether the operator during reconcile procedure should drop replicas when replica volume is lost
+ onLostVolume: yes
+ # Whether the operator during reconcile procedure should drop active replicas when replica is deleted or recreated
+ active: no
+ ################################################
+ ##
+ ## Annotations management section
+ ##
+ ################################################
+ annotation:
+ # Applied when:
+ # 1. Propagating annotations from the CHI's `metadata.annotations` to child objects' `metadata.annotations`,
+ # 2. Propagating annotations from the CHI Template's `metadata.annotations` to CHI's `metadata.annotations`,
+ # Include annotations from the following list:
+ # Applied only when not empty. Empty list means "include all, no selection"
+ include: []
+ # Exclude annotations from the following list:
+ exclude: []
+ ################################################
+ ##
+ ## Labels management section
+ ##
+ ################################################
+ label:
+ # Applied when:
+ # 1. Propagating labels from the CHI's `metadata.labels` to child objects' `metadata.labels`,
+ # 2. Propagating labels from the CHI Template's `metadata.labels` to CHI's `metadata.labels`,
+ # Include labels from the following list:
+ # Applied only when not empty. Empty list means "include all, no selection"
+ include: []
+ # Exclude labels from the following list:
+ # Applied only when not empty. Empty list means "nothing to exclude, no selection"
+ exclude: []
+ # Whether to append *Scope* labels to StatefulSet and Pod.
+ # Full list of available *scope* labels check in 'labeler.go'
+ # LabelShardScopeIndex
+ # LabelReplicaScopeIndex
+ # LabelCHIScopeIndex
+ # LabelCHIScopeCycleSize
+ # LabelCHIScopeCycleIndex
+ # LabelCHIScopeCycleOffset
+ # LabelClusterScopeIndex
+ # LabelClusterScopeCycleSize
+ # LabelClusterScopeCycleIndex
+ # LabelClusterScopeCycleOffset
+ appendScope: "no"
+ ################################################
+ ##
+ ## Metrics management section
+ ##
+ ################################################
+ metrics:
+ labels:
+ exclude: []
+ ################################################
+ ##
+ ## Status management section
+ ##
+ ################################################
+ status:
+ fields:
+ action: false
+ actions: false
+ error: true
+ errors: true
+ ################################################
+ ##
+ ## StatefulSet management section
+ ##
+ ################################################
+ statefulSet:
+ revisionHistoryLimit: 0
+ ################################################
+ ##
+ ## Pod management section
+ ##
+ ################################################
+ pod:
+ # Grace period for Pod termination.
+ # How many seconds to wait between sending
+ # SIGTERM and SIGKILL during Pod termination process.
+ # Increase this number is case of slow shutdown.
+ terminationGracePeriod: 30
+ ################################################
+ ##
+ ## Log parameters section
+ ##
+ ################################################
+ logger:
+ logtostderr: "true"
+ alsologtostderr: "false"
+ v: "1"
+ stderrthreshold: ""
+ vmodule: ""
+ log_backtrace_at: ""
+ templatesdFiles:
+ 001-templates.json.example: |
+ {
+ "apiVersion": "clickhouse.altinity.com/v1",
+ "kind": "ClickHouseInstallationTemplate",
+ "metadata": {
+ "name": "01-default-volumeclaimtemplate"
+ },
+ "spec": {
+ "templates": {
+ "volumeClaimTemplates": [
+ {
+ "name": "chi-default-volume-claim-template",
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "2Gi"
+ }
+ }
+ }
+ }
+ ],
+ "podTemplates": [
+ {
+ "name": "chi-default-oneperhost-pod-template",
+ "distribution": "OnePerHost",
+ "spec": {
+ "containers" : [
+ {
+ "name": "clickhouse",
+ "image": "clickhouse/clickhouse-server:23.8",
+ "ports": [
+ {
+ "name": "http",
+ "containerPort": 8123
+ },
+ {
+ "name": "client",
+ "containerPort": 9000
+ },
+ {
+ "name": "interserver",
+ "containerPort": 9009
+ }
+ ]
+ }
+ ]
+ }
+ }
+ ]
+ }
+ }
+ }
+ default-pod-template.yaml.example: |
+ apiVersion: "clickhouse.altinity.com/v1"
+ kind: "ClickHouseInstallationTemplate"
+ metadata:
+ name: "default-oneperhost-pod-template"
+ spec:
+ templates:
+ podTemplates:
+ - name: default-oneperhost-pod-template
+ distribution: "OnePerHost"
+ default-storage-template.yaml.example: |
+ apiVersion: "clickhouse.altinity.com/v1"
+ kind: "ClickHouseInstallationTemplate"
+ metadata:
+ name: "default-storage-template-2Gi"
+ spec:
+ templates:
+ volumeClaimTemplates:
+ - name: default-storage-template-2Gi
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 2Gi
+ readme: |-
+ Templates in this folder are packaged with an operator and available via 'useTemplate'
+ usersdFiles:
+ 01-clickhouse-operator-profile.xml: |
+
+
+
+
+
+
+
+
+
+
+
+ 0
+ 1
+ 10
+ 0
+ 0
+
+
+
+ 02-clickhouse-default-profile.xml: |-
+
+
+
+
+
+
+
+
+
+ 2
+ 1
+ 1000
+ 1
+ 1
+ 1
+ nearest_hostname
+ 0
+
+
+
+
+ keeperConfdFiles: null
+ keeperConfigdFiles:
+ 01-keeper-01-default-config.xml: |
+
+
+
+
+
+
+
+
+
+ 10000
+ 10000
+ information
+ 100000
+
+ true
+ /var/lib/clickhouse-keeper/coordination/logs
+ /var/lib/clickhouse-keeper/coordination/snapshots
+ /var/lib/clickhouse-keeper
+ 2181
+
+ ::
+ 0.0.0.0
+ 1
+
+ 1
+ information
+
+ 4096
+
+ 01-keeper-02-readiness.xml: |
+
+
+
+
+
+
+
+
+
+ 9182
+
+ /ready
+
+
+
+
+ 01-keeper-03-enable-reconfig.xml: |-
+
+
+
+
+
+
+
+
+ false
+
+
+ keeperTemplatesdFiles:
+ readme: |-
+ Templates in this folder are packaged with an operator and available via 'useTemplate'
+ keeperUsersdFiles: null
+# additionalResources -- list of additional resources to create (processed via `tpl` function),
+# useful for create ClickHouse clusters together with clickhouse-operator.
+# check `kubectl explain chi` for details
+additionalResources: []
+# - |
+# apiVersion: v1
+# kind: ConfigMap
+# metadata:
+# name: {{ include "altinity-clickhouse-operator.fullname" . }}-cm
+# namespace: {{ include "altinity-clickhouse-operator.namespace" . }}
+# - |
+# apiVersion: v1
+# kind: Secret
+# metadata:
+# name: {{ include "altinity-clickhouse-operator.fullname" . }}-s
+# namespace: {{ include "altinity-clickhouse-operator.namespace" . }}
+# stringData:
+# mykey: my-value
+# - |
+# apiVersion: clickhouse.altinity.com/v1
+# kind: ClickHouseInstallation
+# metadata:
+# name: {{ include "altinity-clickhouse-operator.fullname" . }}-chi
+# namespace: {{ include "altinity-clickhouse-operator.namespace" . }}
+# spec:
+# configuration:
+# clusters:
+# - name: default
+# layout:
+# shardsCount: 1
+
+dashboards:
+ # dashboards.enabled -- provision grafana dashboards as configMaps (can be synced by grafana dashboards sidecar https://github.com/grafana/helm-charts/blob/grafana-8.3.4/charts/grafana/values.yaml#L778 )
+ enabled: false
+ # dashboards.additionalLabels -- labels to add to a secret with dashboards
+ additionalLabels:
+ # dashboards.additionalLabels.grafana_dashboard - will watch when official grafana helm chart sidecar.dashboards.enabled=true
+ grafana_dashboard: "1"
+ # dashboards.annotations -- annotations to add to a secret with dashboards
+ annotations:
+ # dashboards.annotations.grafana_folder -- folder where will place dashboards, requires define values in official grafana helm chart sidecar.dashboards.folderAnnotation: grafana_folder
+ grafana_folder: clickhouse-operator
+
diff --git a/clickhouse-operator-values.yaml b/clickhouse-operator-values.yaml
new file mode 100644
index 0000000..e69de29
diff --git a/clickhouse/app/clickhouse.yaml b/clickhouse/app/clickhouse.yaml
new file mode 100644
index 0000000..1b87f2b
--- /dev/null
+++ b/clickhouse/app/clickhouse.yaml
@@ -0,0 +1,58 @@
+apiVersion: clickhouse.altinity.com/v1
+kind: ClickHouseInstallation
+metadata:
+ name: clickhouse
+spec:
+ configuration:
+ users:
+ # printf 'test_password' | sha256sum
+ #test_user/password_sha256_hex: 10a6e6cc8311a3e2bcc09bf6c199adecd5dd59408c343e926b129c4914f3cb01
+ admin/password: Transcity123
+ # to allow access outside from kubernetes
+ admin/networks/ip:
+ - 0.0.0.0/0
+ zookeeper:
+ nodes:
+ - host: chk-keeper-chk-0-0
+ port: 2181
+
+ clusters:
+ - name: "report"
+ layout:
+ shardsCount: 1
+ replicasCount: 1
+
+ defaults:
+ templates:
+ podTemplate: default
+
+ templates:
+ podTemplates:
+ - name: default
+ spec:
+ containers:
+ - name: clickhouse
+ image: clickhouse/clickhouse-server:24.8
+ volumeMounts:
+ - name: data-storage-vc-template
+ mountPath: /var/lib/clickhouse
+ - name: log-storage-vc-template
+ mountPath: /var/log/clickhouse-server
+
+ volumeClaimTemplates:
+ - name: data-storage-vc-template
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 30Gi
+ - name: log-storage-vc-template
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 20Gi
+
+
diff --git a/clickhouse/app/keeper.yaml b/clickhouse/app/keeper.yaml
new file mode 100644
index 0000000..51f420e
--- /dev/null
+++ b/clickhouse/app/keeper.yaml
@@ -0,0 +1,30 @@
+apiVersion: clickhouse-keeper.altinity.com/v1
+kind: ClickHouseKeeperInstallation
+metadata:
+ name: keeper
+spec:
+ defaults:
+ templates:
+ podTemplate: default
+ volumeClaimTemplate: default
+ templates:
+ podTemplates:
+ - name: default
+ spec:
+ containers:
+ - name: clickhouse-keeper
+ image: "clickhouse/clickhouse-keeper:24.3.5.46"
+ volumeClaimTemplates:
+ - name: default
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 10Gi
+ configuration:
+ clusters:
+ - name: chk
+ layout:
+ replicasCount: 1
+
diff --git a/clickhouse/app/kustomization.yaml b/clickhouse/app/kustomization.yaml
new file mode 100644
index 0000000..a86d4ee
--- /dev/null
+++ b/clickhouse/app/kustomization.yaml
@@ -0,0 +1,6 @@
+namespace: reports-clickhouse
+resources:
+- ../base
+- namespace.yaml
+- keeper.yaml
+- clickhouse.yaml
diff --git a/clickhouse/app/namespace.yaml b/clickhouse/app/namespace.yaml
new file mode 100644
index 0000000..01739bb
--- /dev/null
+++ b/clickhouse/app/namespace.yaml
@@ -0,0 +1,7 @@
+apiVersion: v1
+kind: Namespace
+metadata:
+ labels:
+ kubernetes.io/metadata.name: reports-clickhouse
+ name: reports-clickhouse
+
diff --git a/clickhouse/base/clickhouse-operator.yaml b/clickhouse/base/clickhouse-operator.yaml
new file mode 100644
index 0000000..8cc04b9
--- /dev/null
+++ b/clickhouse/base/clickhouse-operator.yaml
@@ -0,0 +1,5748 @@
+---
+# Source: altinity-clickhouse-operator/templates/generated/ServiceAccount-clickhouse-operator.yaml
+# Template Parameters:
+#
+# COMMENT=
+# NAMESPACE=kube-system
+# NAME=clickhouse-operator
+#
+# Setup ServiceAccount
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: ch-operator-altinity-clickhouse-operator
+ namespace: reports-clickhouse
+ labels:
+ helm.sh/chart: altinity-clickhouse-operator-0.25.6
+ app.kubernetes.io/name: altinity-clickhouse-operator
+ app.kubernetes.io/instance: ch-operator
+ app.kubernetes.io/version: "0.25.6"
+ app.kubernetes.io/managed-by: Helm
+ annotations:
+ meta.helm.sh/release-name: ch-operator
+ meta.helm.sh/release-namespace: reports-clickhouse
+
+# Template Parameters:
+#
+# NAMESPACE=kube-system
+# COMMENT=#
+# ROLE_KIND=ClusterRole
+# ROLE_NAME=clickhouse-operator-kube-system
+# ROLE_BINDING_KIND=ClusterRoleBinding
+# ROLE_BINDING_NAME=clickhouse-operator-kube-system
+#
+---
+# Source: altinity-clickhouse-operator/templates/generated/Secret-clickhouse-operator.yaml
+#
+# Template parameters available:
+# NAMESPACE=kube-system
+# COMMENT=
+# OPERATOR_VERSION=0.25.6
+# CH_USERNAME_SECRET_PLAIN=clickhouse_operator
+# CH_PASSWORD_SECRET_PLAIN=clickhouse_operator_password
+#
+apiVersion: v1
+kind: Secret
+metadata:
+ name: ch-operator-altinity-clickhouse-operator
+ namespace: reports-clickhouse
+ labels:
+ helm.sh/chart: altinity-clickhouse-operator-0.25.6
+ app.kubernetes.io/name: altinity-clickhouse-operator
+ app.kubernetes.io/instance: ch-operator
+ app.kubernetes.io/version: "0.25.6"
+ app.kubernetes.io/managed-by: Helm
+ annotations:
+ meta.helm.sh/release-name: ch-operator
+ meta.helm.sh/release-namespace: reports-clickhouse
+type: Opaque
+data:
+ username: Y2xpY2tob3VzZV9vcGVyYXRvcg==
+ password: Y2xpY2tob3VzZV9vcGVyYXRvcl9wYXNzd29yZA==
+---
+# Source: altinity-clickhouse-operator/templates/generated/ConfigMap-etc-clickhouse-operator-confd-files.yaml
+# Template Parameters:
+#
+# NAME=etc-clickhouse-operator-confd-files
+# NAMESPACE=kube-system
+# COMMENT=
+#
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: ch-operator-altinity-clickhouse-operator-confd-files
+ namespace: reports-clickhouse
+ labels:
+ helm.sh/chart: altinity-clickhouse-operator-0.25.6
+ app.kubernetes.io/name: altinity-clickhouse-operator
+ app.kubernetes.io/instance: ch-operator
+ app.kubernetes.io/version: "0.25.6"
+ app.kubernetes.io/managed-by: Helm
+ annotations:
+ meta.helm.sh/release-name: ch-operator
+ meta.helm.sh/release-namespace: reports-clickhouse
+data:
+ null
+---
+# Source: altinity-clickhouse-operator/templates/generated/ConfigMap-etc-clickhouse-operator-configd-files.yaml
+# Template Parameters:
+#
+# NAME=etc-clickhouse-operator-configd-files
+# NAMESPACE=kube-system
+# COMMENT=
+#
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: ch-operator-altinity-clickhouse-operator-configd-files
+ namespace: reports-clickhouse
+ labels:
+ helm.sh/chart: altinity-clickhouse-operator-0.25.6
+ app.kubernetes.io/name: altinity-clickhouse-operator
+ app.kubernetes.io/instance: ch-operator
+ app.kubernetes.io/version: "0.25.6"
+ app.kubernetes.io/managed-by: Helm
+ annotations:
+ meta.helm.sh/release-name: ch-operator
+ meta.helm.sh/release-namespace: reports-clickhouse
+data:
+ 01-clickhouse-01-listen.xml: |
+
+
+
+
+
+
+
+
+ ::
+ 0.0.0.0
+ 1
+
+ 01-clickhouse-02-logger.xml: |
+
+
+
+
+
+
+
+
+
+ debug
+ /var/log/clickhouse-server/clickhouse-server.log
+ /var/log/clickhouse-server/clickhouse-server.err.log
+ 1000M
+ 10
+
+ 1
+
+
+ 01-clickhouse-03-query_log.xml: |
+
+
+
+
+
+
+
+
+ system
+
+ Engine = MergeTree PARTITION BY event_date ORDER BY event_time TTL event_date + interval 30 day
+ 7500
+
+
+
+ 01-clickhouse-04-part_log.xml: |
+
+
+
+
+
+
+
+
+ system
+
+ Engine = MergeTree PARTITION BY event_date ORDER BY event_time TTL event_date + interval 30 day
+ 7500
+
+
+ 01-clickhouse-05-trace_log.xml: |-
+
+
+
+
+
+
+
+
+ system
+
+ Engine = MergeTree PARTITION BY event_date ORDER BY event_time TTL event_date + interval 30 day
+ 7500
+
+
+---
+# Source: altinity-clickhouse-operator/templates/generated/ConfigMap-etc-clickhouse-operator-files.yaml
+# Template Parameters:
+#
+# NAME=etc-clickhouse-operator-files
+# NAMESPACE=kube-system
+# COMMENT=
+#
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: ch-operator-altinity-clickhouse-operator-files
+ namespace: reports-clickhouse
+ labels:
+ helm.sh/chart: altinity-clickhouse-operator-0.25.6
+ app.kubernetes.io/name: altinity-clickhouse-operator
+ app.kubernetes.io/instance: ch-operator
+ app.kubernetes.io/version: "0.25.6"
+ app.kubernetes.io/managed-by: Helm
+ annotations:
+ meta.helm.sh/release-name: ch-operator
+ meta.helm.sh/release-namespace: reports-clickhouse
+data:
+ config.yaml: |-
+ annotation:
+ exclude: []
+ include: []
+ clickhouse:
+ access:
+ password: ""
+ port: 8123
+ rootCA: ""
+ scheme: auto
+ secret:
+ name: 'ch-operator-altinity-clickhouse-operator'
+ namespace: ""
+ timeouts:
+ connect: 1
+ query: 4
+ username: ""
+ addons:
+ rules:
+ - spec:
+ configuration:
+ files: null
+ profiles: null
+ quotas: null
+ settings: null
+ users: null
+ version: '*'
+ - spec:
+ configuration:
+ files: null
+ profiles: null
+ quotas: null
+ settings: null
+ users:
+ '{clickhouseOperatorUser}/access_management': 1
+ '{clickhouseOperatorUser}/named_collection_control': 1
+ '{clickhouseOperatorUser}/show_named_collections': 1
+ '{clickhouseOperatorUser}/show_named_collections_secrets': 1
+ version: '>= 23.3'
+ - spec:
+ configuration:
+ files: null
+ profiles:
+ clickhouse_operator/format_display_secrets_in_show_and_select: 1
+ quotas: null
+ settings:
+ display_secrets_in_show_and_select: 1
+ users: null
+ version: '>= 23.5'
+ configuration:
+ file:
+ path:
+ common: chi/config.d
+ host: chi/conf.d
+ user: chi/users.d
+ network:
+ hostRegexpTemplate: (chi-{chi}-[^.]+\d+-\d+|clickhouse\-{chi})\.{namespace}\.svc\.cluster\.local$
+ user:
+ default:
+ networksIP:
+ - ::1
+ - 127.0.0.1
+ password: default
+ profile: default
+ quota: default
+ configurationRestartPolicy:
+ rules:
+ - rules:
+ - settings/*: "yes"
+ - settings/access_control_path: "no"
+ - settings/dictionaries_config: "no"
+ - settings/max_server_memory_*: "no"
+ - settings/max_*_to_drop: "no"
+ - settings/max_concurrent_queries: "no"
+ - settings/models_config: "no"
+ - settings/user_defined_executable_functions_config: "no"
+ - settings/logger/*: "no"
+ - settings/macros/*: "no"
+ - settings/remote_servers/*: "no"
+ - settings/user_directories/*: "no"
+ - settings/display_secrets_in_show_and_select: "no"
+ - zookeeper/*: "no"
+ - files/*.xml: "yes"
+ - files/config.d/*.xml: "yes"
+ - files/config.d/*dict*.xml: "no"
+ - files/config.d/*no_restart*: "no"
+ - profiles/default/background_*_pool_size: "yes"
+ - profiles/default/max_*_for_server: "yes"
+ version: '*'
+ - rules:
+ - settings/logger: "yes"
+ version: 21.*
+ metrics:
+ timeouts:
+ collect: 9
+ keeper:
+ configuration:
+ file:
+ path:
+ common: chk/keeper_config.d
+ host: chk/conf.d
+ user: chk/users.d
+ label:
+ appendScope: "no"
+ exclude: []
+ include: []
+ logger:
+ alsologtostderr: "false"
+ log_backtrace_at: ""
+ logtostderr: "true"
+ stderrthreshold: ""
+ v: "1"
+ vmodule: ""
+ metrics:
+ labels:
+ exclude: []
+ pod:
+ terminationGracePeriod: 30
+ reconcile:
+ host:
+ drop:
+ replicas:
+ active: false
+ onDelete: true
+ onLostVolume: true
+ wait:
+ exclude: true
+ include: false
+ probes:
+ readiness: true
+ startup: false
+ queries: true
+ replicas:
+ all: false
+ delay: 10
+ new: true
+ runtime:
+ reconcileCHIsThreadsNumber: 10
+ reconcileShardsMaxConcurrencyPercent: 50
+ reconcileShardsThreadsNumber: 5
+ statefulSet:
+ create:
+ onFailure: ignore
+ update:
+ onFailure: abort
+ pollInterval: 5
+ timeout: 300
+ statefulSet:
+ revisionHistoryLimit: 0
+ status:
+ fields:
+ action: false
+ actions: false
+ error: true
+ errors: true
+ template:
+ chi:
+ path: chi/templates.d
+ policy: ApplyOnNextReconcile
+ chk:
+ path: chk/templates.d
+ policy: ApplyOnNextReconcile
+ watch:
+ namespaces: []
+---
+# Source: altinity-clickhouse-operator/templates/generated/ConfigMap-etc-clickhouse-operator-templatesd-files.yaml
+# Template Parameters:
+#
+# NAME=etc-clickhouse-operator-templatesd-files
+# NAMESPACE=kube-system
+# COMMENT=
+#
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: ch-operator-altinity-clickhouse-operator-templatesd-files
+ namespace: reports-clickhouse
+ labels:
+ helm.sh/chart: altinity-clickhouse-operator-0.25.6
+ app.kubernetes.io/name: altinity-clickhouse-operator
+ app.kubernetes.io/instance: ch-operator
+ app.kubernetes.io/version: "0.25.6"
+ app.kubernetes.io/managed-by: Helm
+ annotations:
+ meta.helm.sh/release-name: ch-operator
+ meta.helm.sh/release-namespace: reports-clickhouse
+data:
+ 001-templates.json.example: |
+ {
+ "apiVersion": "clickhouse.altinity.com/v1",
+ "kind": "ClickHouseInstallationTemplate",
+ "metadata": {
+ "name": "01-default-volumeclaimtemplate"
+ },
+ "spec": {
+ "templates": {
+ "volumeClaimTemplates": [
+ {
+ "name": "chi-default-volume-claim-template",
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "2Gi"
+ }
+ }
+ }
+ }
+ ],
+ "podTemplates": [
+ {
+ "name": "chi-default-oneperhost-pod-template",
+ "distribution": "OnePerHost",
+ "spec": {
+ "containers" : [
+ {
+ "name": "clickhouse",
+ "image": "clickhouse/clickhouse-server:23.8",
+ "ports": [
+ {
+ "name": "http",
+ "containerPort": 8123
+ },
+ {
+ "name": "client",
+ "containerPort": 9000
+ },
+ {
+ "name": "interserver",
+ "containerPort": 9009
+ }
+ ]
+ }
+ ]
+ }
+ }
+ ]
+ }
+ }
+ }
+ default-pod-template.yaml.example: |
+ apiVersion: "clickhouse.altinity.com/v1"
+ kind: "ClickHouseInstallationTemplate"
+ metadata:
+ name: "default-oneperhost-pod-template"
+ spec:
+ templates:
+ podTemplates:
+ - name: default-oneperhost-pod-template
+ distribution: "OnePerHost"
+ default-storage-template.yaml.example: |
+ apiVersion: "clickhouse.altinity.com/v1"
+ kind: "ClickHouseInstallationTemplate"
+ metadata:
+ name: "default-storage-template-2Gi"
+ spec:
+ templates:
+ volumeClaimTemplates:
+ - name: default-storage-template-2Gi
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 2Gi
+ readme: Templates in this folder are packaged with an operator and available via 'useTemplate'
+---
+# Source: altinity-clickhouse-operator/templates/generated/ConfigMap-etc-clickhouse-operator-usersd-files.yaml
+# Template Parameters:
+#
+# NAME=etc-clickhouse-operator-usersd-files
+# NAMESPACE=kube-system
+# COMMENT=
+#
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: ch-operator-altinity-clickhouse-operator-usersd-files
+ namespace: reports-clickhouse
+ labels:
+ helm.sh/chart: altinity-clickhouse-operator-0.25.6
+ app.kubernetes.io/name: altinity-clickhouse-operator
+ app.kubernetes.io/instance: ch-operator
+ app.kubernetes.io/version: "0.25.6"
+ app.kubernetes.io/managed-by: Helm
+ annotations:
+ meta.helm.sh/release-name: ch-operator
+ meta.helm.sh/release-namespace: reports-clickhouse
+data:
+ 01-clickhouse-operator-profile.xml: |
+
+
+
+
+
+
+
+
+
+
+
+ 0
+ 1
+ 10
+ 0
+ 0
+
+
+
+ 02-clickhouse-default-profile.xml: |-
+
+
+
+
+
+
+
+
+
+ 2
+ 1
+ 1000
+ 1
+ 1
+ 1
+ nearest_hostname
+ 0
+
+
+
+
+---
+# Source: altinity-clickhouse-operator/templates/generated/ConfigMap-etc-keeper-operator-confd-files.yaml
+# Template Parameters:
+#
+# NAME=etc-keeper-operator-confd-files
+# NAMESPACE=kube-system
+# COMMENT=
+#
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: ch-operator-altinity-clickhouse-operator-keeper-confd-files
+ namespace: reports-clickhouse
+ labels:
+ helm.sh/chart: altinity-clickhouse-operator-0.25.6
+ app.kubernetes.io/name: altinity-clickhouse-operator
+ app.kubernetes.io/instance: ch-operator
+ app.kubernetes.io/version: "0.25.6"
+ app.kubernetes.io/managed-by: Helm
+ annotations:
+ meta.helm.sh/release-name: ch-operator
+ meta.helm.sh/release-namespace: reports-clickhouse
+data:
+ null
+---
+# Source: altinity-clickhouse-operator/templates/generated/ConfigMap-etc-keeper-operator-configd-files.yaml
+# Template Parameters:
+#
+# NAME=etc-keeper-operator-configd-files
+# NAMESPACE=kube-system
+# COMMENT=
+#
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: ch-operator-altinity-clickhouse-operator-keeper-configd-files
+ namespace: reports-clickhouse
+ labels:
+ helm.sh/chart: altinity-clickhouse-operator-0.25.6
+ app.kubernetes.io/name: altinity-clickhouse-operator
+ app.kubernetes.io/instance: ch-operator
+ app.kubernetes.io/version: "0.25.6"
+ app.kubernetes.io/managed-by: Helm
+ annotations:
+ meta.helm.sh/release-name: ch-operator
+ meta.helm.sh/release-namespace: reports-clickhouse
+data:
+ 01-keeper-01-default-config.xml: |
+
+
+
+
+
+
+
+
+
+ 10000
+ 10000
+ information
+ 100000
+
+ true
+ /var/lib/clickhouse-keeper/coordination/logs
+ /var/lib/clickhouse-keeper/coordination/snapshots
+ /var/lib/clickhouse-keeper
+ 2181
+
+ ::
+ 0.0.0.0
+ 1
+
+ 1
+ information
+
+ 4096
+
+ 01-keeper-02-readiness.xml: |
+
+
+
+
+
+
+
+
+
+ 9182
+
+ /ready
+
+
+
+
+ 01-keeper-03-enable-reconfig.xml: |-
+
+
+
+
+
+
+
+
+ false
+
+
+---
+# Source: altinity-clickhouse-operator/templates/generated/ConfigMap-etc-keeper-operator-templatesd-files.yaml
+# Template Parameters:
+#
+# NAME=etc-keeper-operator-templatesd-files
+# NAMESPACE=kube-system
+# COMMENT=
+#
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: ch-operator-altinity-clickhouse-operator-keeper-templatesd-files
+ namespace: reports-clickhouse
+ labels:
+ helm.sh/chart: altinity-clickhouse-operator-0.25.6
+ app.kubernetes.io/name: altinity-clickhouse-operator
+ app.kubernetes.io/instance: ch-operator
+ app.kubernetes.io/version: "0.25.6"
+ app.kubernetes.io/managed-by: Helm
+ annotations:
+ meta.helm.sh/release-name: ch-operator
+ meta.helm.sh/release-namespace: reports-clickhouse
+data:
+ readme: Templates in this folder are packaged with an operator and available via 'useTemplate'
+---
+# Source: altinity-clickhouse-operator/templates/generated/ConfigMap-etc-keeper-operator-usersd-files.yaml
+# Template Parameters:
+#
+# NAME=etc-keeper-operator-usersd-files
+# NAMESPACE=kube-system
+# COMMENT=
+#
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: ch-operator-altinity-clickhouse-operator-keeper-usersd-files
+ namespace: reports-clickhouse
+ labels:
+ helm.sh/chart: altinity-clickhouse-operator-0.25.6
+ app.kubernetes.io/name: altinity-clickhouse-operator
+ app.kubernetes.io/instance: ch-operator
+ app.kubernetes.io/version: "0.25.6"
+ app.kubernetes.io/managed-by: Helm
+ annotations:
+ meta.helm.sh/release-name: ch-operator
+ meta.helm.sh/release-namespace: reports-clickhouse
+data:
+ null
+---
+# Source: altinity-clickhouse-operator/templates/generated/ClusterRole-clickhouse-operator-kube-system.yaml
+# Specifies either
+# ClusterRole
+# or
+# Role
+# to be bound to ServiceAccount.
+# ClusterRole is namespace-less and must have unique name
+# Role is namespace-bound
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: ch-operator-altinity-clickhouse-operator
+ #namespace: kube-system
+ labels:
+ helm.sh/chart: altinity-clickhouse-operator-0.25.6
+ app.kubernetes.io/name: altinity-clickhouse-operator
+ app.kubernetes.io/instance: ch-operator
+ app.kubernetes.io/version: "0.25.6"
+ app.kubernetes.io/managed-by: Helm
+ annotations:
+ meta.helm.sh/release-name: ch-operator
+ meta.helm.sh/release-namespace: reports-clickhouse
+rules:
+ #
+ # Core API group
+ #
+ - apiGroups:
+ - ""
+ resources:
+ - configmaps
+ - services
+ - persistentvolumeclaims
+ - secrets
+ verbs:
+ - get
+ - list
+ - patch
+ - update
+ - watch
+ - create
+ - delete
+ - apiGroups:
+ - ""
+ resources:
+ - endpoints
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - events
+ verbs:
+ - create
+ - apiGroups:
+ - ""
+ resources:
+ - persistentvolumes
+ verbs:
+ - get
+ - list
+ - patch
+ - update
+ - watch
+ - apiGroups:
+ - ""
+ resources:
+ - pods
+ verbs:
+ - get
+ - list
+ - patch
+ - update
+ - watch
+ - delete
+ - apiGroups:
+ - ""
+ resources:
+ - secrets
+ verbs:
+ - get
+ - list
+ #
+ # apps.* resources
+ #
+ - apiGroups:
+ - apps
+ resources:
+ - statefulsets
+ verbs:
+ - get
+ - list
+ - patch
+ - update
+ - watch
+ - create
+ - delete
+ - apiGroups:
+ - apps
+ resources:
+ - replicasets
+ verbs:
+ - get
+ - patch
+ - update
+ - delete
+ # The operator deployment personally, identified by name
+ - apiGroups:
+ - apps
+ resources:
+ - deployments
+ resourceNames:
+ - ch-operator-altinity-clickhouse-operator
+ verbs:
+ - get
+ - patch
+ - update
+ - delete
+ #
+ # policy.* resources
+ #
+ - apiGroups:
+ - policy
+ resources:
+ - poddisruptionbudgets
+ verbs:
+ - get
+ - list
+ - patch
+ - update
+ - watch
+ - create
+ - delete
+ #
+ # discovery.* resources
+ #
+ - apiGroups:
+ - discovery.k8s.io
+ resources:
+ - endpointslices
+ verbs:
+ - get
+ - list
+ - watch
+ #
+ # apiextensions
+ #
+ - apiGroups:
+ - apiextensions.k8s.io
+ resources:
+ - customresourcedefinitions
+ verbs:
+ - get
+ - list
+ # clickhouse - related resources
+ - apiGroups:
+ - clickhouse.altinity.com
+ #
+ # The operators specific Custom Resources
+ #
+
+ resources:
+ - clickhouseinstallations
+ verbs:
+ - get
+ - list
+ - watch
+ - patch
+ - update
+ - delete
+ - apiGroups:
+ - clickhouse.altinity.com
+ resources:
+ - clickhouseinstallationtemplates
+ - clickhouseoperatorconfigurations
+ verbs:
+ - get
+ - list
+ - watch
+ - apiGroups:
+ - clickhouse.altinity.com
+ resources:
+ - clickhouseinstallations/finalizers
+ - clickhouseinstallationtemplates/finalizers
+ - clickhouseoperatorconfigurations/finalizers
+ verbs:
+ - update
+ - apiGroups:
+ - clickhouse.altinity.com
+ resources:
+ - clickhouseinstallations/status
+ - clickhouseinstallationtemplates/status
+ - clickhouseoperatorconfigurations/status
+ verbs:
+ - get
+ - update
+ - patch
+ - create
+ - delete
+ # clickhouse-keeper - related resources
+ - apiGroups:
+ - clickhouse-keeper.altinity.com
+ resources:
+ - clickhousekeeperinstallations
+ verbs:
+ - get
+ - list
+ - watch
+ - patch
+ - update
+ - delete
+ - apiGroups:
+ - clickhouse-keeper.altinity.com
+ resources:
+ - clickhousekeeperinstallations/finalizers
+ verbs:
+ - update
+ - apiGroups:
+ - clickhouse-keeper.altinity.com
+ resources:
+ - clickhousekeeperinstallations/status
+ verbs:
+ - get
+ - update
+ - patch
+ - create
+ - delete
+---
+# Source: altinity-clickhouse-operator/templates/generated/ClusterRoleBinding-clickhouse-operator-kube-system.yaml
+# Specifies either
+# ClusterRoleBinding between ClusterRole and ServiceAccount.
+# or
+# RoleBinding between Role and ServiceAccount.
+# ClusterRoleBinding is namespace-less and must have unique name
+# RoleBinding is namespace-bound
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: ch-operator-altinity-clickhouse-operator
+ #namespace: kube-system
+ labels:
+ helm.sh/chart: altinity-clickhouse-operator-0.25.6
+ app.kubernetes.io/name: altinity-clickhouse-operator
+ app.kubernetes.io/instance: ch-operator
+ app.kubernetes.io/version: "0.25.6"
+ app.kubernetes.io/managed-by: Helm
+ annotations:
+ meta.helm.sh/release-name: ch-operator
+ meta.helm.sh/release-namespace: reports-clickhouse
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: ch-operator-altinity-clickhouse-operator
+subjects:
+ - kind: ServiceAccount
+ name: ch-operator-altinity-clickhouse-operator
+ namespace: reports-clickhouse
+
+# Template Parameters:
+#
+# NAMESPACE=kube-system
+# COMMENT=
+# ROLE_KIND=Role
+# ROLE_NAME=clickhouse-operator
+# ROLE_BINDING_KIND=RoleBinding
+# ROLE_BINDING_NAME=clickhouse-operator
+#
+---
+# Source: altinity-clickhouse-operator/templates/generated/Service-clickhouse-operator-metrics.yaml
+# Template Parameters:
+#
+# NAMESPACE=kube-system
+# COMMENT=
+#
+# Setup ClusterIP Service to provide monitoring metrics for Prometheus
+# Service would be created in kubectl-specified namespace
+# In order to get access outside of k8s it should be exposed as:
+# kubectl --namespace prometheus port-forward service/prometheus 9090
+# and point browser to localhost:9090
+kind: Service
+apiVersion: v1
+metadata:
+ name: ch-operator-altinity-clickhouse-operator-metrics
+ namespace: reports-clickhouse
+ labels:
+ helm.sh/chart: altinity-clickhouse-operator-0.25.6
+ app.kubernetes.io/name: altinity-clickhouse-operator
+ app.kubernetes.io/instance: ch-operator
+ app.kubernetes.io/version: "0.25.6"
+ app.kubernetes.io/managed-by: Helm
+ annotations:
+ meta.helm.sh/release-name: ch-operator
+ meta.helm.sh/release-namespace: reports-clickhouse
+spec:
+ ports:
+
+ - port: 8888
+ name: ch-metrics
+
+ - port: 9999
+ name: op-metrics
+ selector:
+ app.kubernetes.io/name: altinity-clickhouse-operator
+ app.kubernetes.io/instance: ch-operator
+---
+# Source: altinity-clickhouse-operator/templates/generated/Deployment-clickhouse-operator.yaml
+# Template Parameters:
+#
+# NAMESPACE=kube-system
+# COMMENT=
+# OPERATOR_IMAGE=altinity/clickhouse-operator:0.25.6
+# OPERATOR_IMAGE_PULL_POLICY=Always
+# METRICS_EXPORTER_IMAGE=altinity/metrics-exporter:0.25.6
+# METRICS_EXPORTER_IMAGE_PULL_POLICY=Always
+#
+# Setup Deployment for clickhouse-operator
+# Deployment would be created in kubectl-specified namespace
+kind: Deployment
+apiVersion: apps/v1
+metadata:
+ name: ch-operator-altinity-clickhouse-operator
+ namespace: reports-clickhouse
+ labels:
+ helm.sh/chart: altinity-clickhouse-operator-0.25.6
+ app.kubernetes.io/name: altinity-clickhouse-operator
+ app.kubernetes.io/instance: ch-operator
+ app.kubernetes.io/version: "0.25.6"
+ app.kubernetes.io/managed-by: Helm
+ annotations:
+ meta.helm.sh/release-name: ch-operator
+ meta.helm.sh/release-namespace: reports-clickhouse
+spec:
+ replicas: 1
+ selector:
+ matchLabels:
+ app.kubernetes.io/name: altinity-clickhouse-operator
+ app.kubernetes.io/instance: ch-operator
+ template:
+ metadata:
+ labels:
+ helm.sh/chart: altinity-clickhouse-operator-0.25.6
+ app.kubernetes.io/name: altinity-clickhouse-operator
+ app.kubernetes.io/instance: ch-operator
+ app.kubernetes.io/version: "0.25.6"
+ app.kubernetes.io/managed-by: Helm
+ annotations:
+
+ clickhouse-operator-metrics/port: "9999"
+ clickhouse-operator-metrics/scrape: "true"
+ prometheus.io/port: "8888"
+ prometheus.io/scrape: "true"
+ checksum/files: 8836d3de71b66728daa2c86dcf7fe1a8d06274e6ff22c0fc0385bff353a16f36
+ checksum/confd-files: 3fbed492912d86bfeac1ec808f3c45465fc42fcb692883bc10d9cc18a1e64195
+ checksum/configd-files: 800a082707a0fe6ac0c5b28e8027eb2ac321d7645c0eb69978a1711dd3d55b63
+ checksum/templatesd-files: 4d5fa75c713c9930f73d8814ad358aad6d27d189b30cc975a5f46d4d3eed2357
+ checksum/usersd-files: bfec59cb6febd1609583e14f73a7b24171da0d9bd577d24ea6538b4d2950c881
+ checksum/keeper-confd-files: 31f096a0cc43dfd6e25a7fdf10977500e617ef7ad1ef1b02984f54a4627d8163
+ checksum/keeper-configd-files: 9dd53c84627e703bc6dc9da4cda06bc9f81240e79f26bd9d1928b778c2930407
+ checksum/keeper-templatesd-files: 032bcc017d0318ce1bb4686b91da961f1e9f31e7251789dcdde837ffe9fbe46f
+ checksum/keeper-usersd-files: 430f540abcb52f6a3a9850b62a49ba0fe17dfbcb9fe24be4ffa3bdaa19af675e
+ spec:
+ serviceAccountName: ch-operator-altinity-clickhouse-operator
+ volumes:
+ - name: etc-clickhouse-operator-folder
+ configMap:
+ name: ch-operator-altinity-clickhouse-operator-files
+ - name: etc-clickhouse-operator-confd-folder
+ configMap:
+ name: ch-operator-altinity-clickhouse-operator-confd-files
+ - name: etc-clickhouse-operator-configd-folder
+ configMap:
+ name: ch-operator-altinity-clickhouse-operator-configd-files
+ - name: etc-clickhouse-operator-templatesd-folder
+ configMap:
+ name: ch-operator-altinity-clickhouse-operator-templatesd-files
+ - name: etc-clickhouse-operator-usersd-folder
+ configMap:
+ name: ch-operator-altinity-clickhouse-operator-usersd-files
+ - name: etc-keeper-operator-confd-folder
+ configMap:
+ name: ch-operator-altinity-clickhouse-operator-keeper-confd-files
+ - name: etc-keeper-operator-configd-folder
+ configMap:
+ name: ch-operator-altinity-clickhouse-operator-keeper-configd-files
+ - name: etc-keeper-operator-templatesd-folder
+ configMap:
+ name: ch-operator-altinity-clickhouse-operator-keeper-templatesd-files
+ - name: etc-keeper-operator-usersd-folder
+ configMap:
+ name: ch-operator-altinity-clickhouse-operator-keeper-usersd-files
+ containers:
+ - name: altinity-clickhouse-operator
+ image: altinity/clickhouse-operator:0.25.6
+ imagePullPolicy: IfNotPresent
+ volumeMounts:
+ - name: etc-clickhouse-operator-folder
+ mountPath: /etc/clickhouse-operator
+ - name: etc-clickhouse-operator-confd-folder
+ mountPath: /etc/clickhouse-operator/chi/conf.d
+ - name: etc-clickhouse-operator-configd-folder
+ mountPath: /etc/clickhouse-operator/chi/config.d
+ - name: etc-clickhouse-operator-templatesd-folder
+ mountPath: /etc/clickhouse-operator/chi/templates.d
+ - name: etc-clickhouse-operator-usersd-folder
+ mountPath: /etc/clickhouse-operator/chi/users.d
+ - name: etc-keeper-operator-confd-folder
+ mountPath: /etc/clickhouse-operator/chk/conf.d
+ - name: etc-keeper-operator-configd-folder
+ mountPath: /etc/clickhouse-operator/chk/keeper_config.d
+ - name: etc-keeper-operator-templatesd-folder
+ mountPath: /etc/clickhouse-operator/chk/templates.d
+ - name: etc-keeper-operator-usersd-folder
+ mountPath: /etc/clickhouse-operator/chk/users.d
+ env:
+ # Pod-specific
+ # spec.nodeName: ip-172-20-52-62.ec2.internal
+ - name: OPERATOR_POD_NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ # metadata.name: clickhouse-operator-6f87589dbb-ftcsf
+ - name: OPERATOR_POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ # metadata.namespace: kube-system
+ - name: OPERATOR_POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ # status.podIP: 100.96.3.2
+ - name: OPERATOR_POD_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.podIP
+ # spec.serviceAccount: clickhouse-operator
+ # spec.serviceAccountName: clickhouse-operator
+ - name: OPERATOR_POD_SERVICE_ACCOUNT
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.serviceAccountName
+ # Container-specific
+ - name: OPERATOR_CONTAINER_CPU_REQUEST
+ valueFrom:
+ resourceFieldRef:
+ containerName: altinity-clickhouse-operator
+ resource: requests.cpu
+ - name: OPERATOR_CONTAINER_CPU_LIMIT
+ valueFrom:
+ resourceFieldRef:
+ containerName: altinity-clickhouse-operator
+ resource: limits.cpu
+ - name: OPERATOR_CONTAINER_MEM_REQUEST
+ valueFrom:
+ resourceFieldRef:
+ containerName: altinity-clickhouse-operator
+ resource: requests.memory
+ - name: OPERATOR_CONTAINER_MEM_LIMIT
+ valueFrom:
+ resourceFieldRef:
+ containerName: altinity-clickhouse-operator
+ resource: limits.memory
+
+ ports:
+ - containerPort: 9999
+ name: op-metrics
+ resources:
+ {}
+ securityContext:
+ {}
+
+ - name: metrics-exporter
+ image: altinity/metrics-exporter:0.25.6
+ imagePullPolicy: IfNotPresent
+ volumeMounts:
+ - name: etc-clickhouse-operator-folder
+ mountPath: /etc/clickhouse-operator
+ - name: etc-clickhouse-operator-confd-folder
+ mountPath: /etc/clickhouse-operator/chi/conf.d
+ - name: etc-clickhouse-operator-configd-folder
+ mountPath: /etc/clickhouse-operator/chi/config.d
+ - name: etc-clickhouse-operator-templatesd-folder
+ mountPath: /etc/clickhouse-operator/chi/templates.d
+ - name: etc-clickhouse-operator-usersd-folder
+ mountPath: /etc/clickhouse-operator/chi/users.d
+ - name: etc-keeper-operator-confd-folder
+ mountPath: /etc/clickhouse-operator/chk/conf.d
+ - name: etc-keeper-operator-configd-folder
+ mountPath: /etc/clickhouse-operator/chk/keeper_config.d
+ - name: etc-keeper-operator-templatesd-folder
+ mountPath: /etc/clickhouse-operator/chk/templates.d
+ - name: etc-keeper-operator-usersd-folder
+ mountPath: /etc/clickhouse-operator/chk/users.d
+ env:
+ # Pod-specific
+ # spec.nodeName: ip-172-20-52-62.ec2.internal
+ - name: OPERATOR_POD_NODE_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+ # metadata.name: clickhouse-operator-6f87589dbb-ftcsf
+ - name: OPERATOR_POD_NAME
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.name
+ # metadata.namespace: kube-system
+ - name: OPERATOR_POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ # status.podIP: 100.96.3.2
+ - name: OPERATOR_POD_IP
+ valueFrom:
+ fieldRef:
+ fieldPath: status.podIP
+ # spec.serviceAccount: clickhouse-operator
+ # spec.serviceAccountName: clickhouse-operator
+ - name: OPERATOR_POD_SERVICE_ACCOUNT
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.serviceAccountName
+ # Container-specific
+ - name: OPERATOR_CONTAINER_CPU_REQUEST
+ valueFrom:
+ resourceFieldRef:
+ containerName: altinity-clickhouse-operator
+ resource: requests.cpu
+ - name: OPERATOR_CONTAINER_CPU_LIMIT
+ valueFrom:
+ resourceFieldRef:
+ containerName: altinity-clickhouse-operator
+ resource: limits.cpu
+ - name: OPERATOR_CONTAINER_MEM_REQUEST
+ valueFrom:
+ resourceFieldRef:
+ containerName: altinity-clickhouse-operator
+ resource: requests.memory
+ - name: OPERATOR_CONTAINER_MEM_LIMIT
+ valueFrom:
+ resourceFieldRef:
+ containerName: altinity-clickhouse-operator
+ resource: limits.memory
+
+ ports:
+ - containerPort: 8888
+ name: ch-metrics
+ resources:
+ {}
+ securityContext:
+ {}
+
+ imagePullSecrets:
+ []
+
+ nodeSelector:
+ {}
+ affinity:
+ {}
+ tolerations:
+ []
+ securityContext:
+ {}
+ topologySpreadConstraints:
+ []
+ strategy:
+ type: Recreate
+---
+# Source: altinity-clickhouse-operator/templates/hooks/crd-install-rbac.yaml
+apiVersion: v1
+kind: ServiceAccount
+metadata:
+ name: ch-operator-altinity-clickhouse-operator-crd-install
+ namespace: reports-clickhouse
+ labels:
+ helm.sh/chart: altinity-clickhouse-operator-0.25.6
+ app.kubernetes.io/name: altinity-clickhouse-operator
+ app.kubernetes.io/instance: ch-operator
+ app.kubernetes.io/version: "0.25.6"
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/component: crd-install-hook
+ annotations:
+ "helm.sh/hook": pre-install,pre-upgrade
+ "helm.sh/hook-weight": "-6"
+ "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
+---
+# Source: altinity-clickhouse-operator/templates/hooks/crd-install-configmap.yaml
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: ch-operator-altinity-clickhouse-operator-crds
+ namespace: reports-clickhouse
+ labels:
+ helm.sh/chart: altinity-clickhouse-operator-0.25.6
+ app.kubernetes.io/name: altinity-clickhouse-operator
+ app.kubernetes.io/instance: ch-operator
+ app.kubernetes.io/version: "0.25.6"
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/component: crd-install-hook
+ annotations:
+ "helm.sh/hook": pre-install,pre-upgrade
+ "helm.sh/hook-weight": "-7"
+ "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
+data:
+ clickhouseinstallations.yaml: |
+ # Template Parameters:
+ #
+ # KIND=ClickHouseInstallation
+ # SINGULAR=clickhouseinstallation
+ # PLURAL=clickhouseinstallations
+ # SHORT=chi
+ # OPERATOR_VERSION=0.25.6
+ #
+ apiVersion: apiextensions.k8s.io/v1
+ kind: CustomResourceDefinition
+ metadata:
+ name: clickhouseinstallations.clickhouse.altinity.com
+ labels:
+ clickhouse.altinity.com/chop: 0.25.6
+ spec:
+ group: clickhouse.altinity.com
+ scope: Namespaced
+ names:
+ kind: ClickHouseInstallation
+ singular: clickhouseinstallation
+ plural: clickhouseinstallations
+ shortNames:
+ - chi
+ versions:
+ - name: v1
+ served: true
+ storage: true
+ additionalPrinterColumns:
+ - name: version
+ type: string
+ description: Operator version
+ priority: 1 # show in wide view
+ jsonPath: .status.chop-version
+ - name: clusters
+ type: integer
+ description: Clusters count
+ jsonPath: .status.clusters
+ - name: shards
+ type: integer
+ description: Shards count
+ priority: 1 # show in wide view
+ jsonPath: .status.shards
+ - name: hosts
+ type: integer
+ description: Hosts count
+ jsonPath: .status.hosts
+ - name: taskID
+ type: string
+ description: TaskID
+ priority: 1 # show in wide view
+ jsonPath: .status.taskID
+ - name: status
+ type: string
+ description: Resource status
+ jsonPath: .status.status
+ - name: hosts-completed
+ type: integer
+ description: Completed hosts count
+ jsonPath: .status.hostsCompleted
+ - name: hosts-updated
+ type: integer
+ description: Updated hosts count
+ priority: 1 # show in wide view
+ jsonPath: .status.hostsUpdated
+ - name: hosts-added
+ type: integer
+ description: Added hosts count
+ priority: 1 # show in wide view
+ jsonPath: .status.hostsAdded
+ - name: hosts-deleted
+ type: integer
+ description: Hosts deleted count
+ priority: 1 # show in wide view
+ jsonPath: .status.hostsDeleted
+ - name: endpoint
+ type: string
+ description: Client access endpoint
+ priority: 1 # show in wide view
+ jsonPath: .status.endpoint
+ - name: age
+ type: date
+ description: Age of the resource
+ # Displayed in all priorities
+ jsonPath: .metadata.creationTimestamp
+ - name: suspend
+ type: string
+ description: Suspend reconciliation
+ # Displayed in all priorities
+ jsonPath: .spec.suspend
+ subresources:
+ status: {}
+ schema:
+ openAPIV3Schema:
+ description: "define a set of Kubernetes resources (StatefulSet, PVC, Service, ConfigMap) which describe behavior one or more clusters"
+ type: object
+ required:
+ - spec
+ properties:
+ apiVersion:
+ description: |
+ APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
+ type: string
+ kind:
+ description: |
+ Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ type: string
+ metadata:
+ type: object
+ status:
+ type: object
+ description: |
+ Status contains many fields like a normalized configuration, clickhouse-operator version, current action and all applied action list, current taskID and all applied taskIDs and other
+ properties:
+ chop-version:
+ type: string
+ description: "Operator version"
+ chop-commit:
+ type: string
+ description: "Operator git commit SHA"
+ chop-date:
+ type: string
+ description: "Operator build date"
+ chop-ip:
+ type: string
+ description: "IP address of the operator's pod which managed this resource"
+ clusters:
+ type: integer
+ minimum: 0
+ description: "Clusters count"
+ shards:
+ type: integer
+ minimum: 0
+ description: "Shards count"
+ replicas:
+ type: integer
+ minimum: 0
+ description: "Replicas count"
+ hosts:
+ type: integer
+ minimum: 0
+ description: "Hosts count"
+ status:
+ type: string
+ description: "Status"
+ taskID:
+ type: string
+ description: "Current task id"
+ taskIDsStarted:
+ type: array
+ description: "Started task ids"
+ nullable: true
+ items:
+ type: string
+ taskIDsCompleted:
+ type: array
+ description: "Completed task ids"
+ nullable: true
+ items:
+ type: string
+ action:
+ type: string
+ description: "Action"
+ actions:
+ type: array
+ description: "Actions"
+ nullable: true
+ items:
+ type: string
+ error:
+ type: string
+ description: "Last error"
+ errors:
+ type: array
+ description: "Errors"
+ nullable: true
+ items:
+ type: string
+ hostsUnchanged:
+ type: integer
+ minimum: 0
+ description: "Unchanged Hosts count"
+ hostsUpdated:
+ type: integer
+ minimum: 0
+ description: "Updated Hosts count"
+ hostsAdded:
+ type: integer
+ minimum: 0
+ description: "Added Hosts count"
+ hostsCompleted:
+ type: integer
+ minimum: 0
+ description: "Completed Hosts count"
+ hostsDeleted:
+ type: integer
+ minimum: 0
+ description: "Deleted Hosts count"
+ hostsDelete:
+ type: integer
+ minimum: 0
+ description: "About to delete Hosts count"
+ pods:
+ type: array
+ description: "Pods"
+ nullable: true
+ items:
+ type: string
+ pod-ips:
+ type: array
+ description: "Pod IPs"
+ nullable: true
+ items:
+ type: string
+ fqdns:
+ type: array
+ description: "Pods FQDNs"
+ nullable: true
+ items:
+ type: string
+ endpoint:
+ type: string
+ description: "Endpoint"
+ endpoints:
+ type: array
+ description: "All endpoints"
+ nullable: true
+ items:
+ type: string
+ generation:
+ type: integer
+ minimum: 0
+ description: "Generation"
+ normalized:
+ type: object
+ description: "Normalized resource requested"
+ nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ normalizedCompleted:
+ type: object
+ description: "Normalized resource completed"
+ nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ actionPlan:
+ type: object
+ description: "Action Plan"
+ nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ hostsWithTablesCreated:
+ type: array
+ description: "List of hosts with tables created by the operator"
+ nullable: true
+ items:
+ type: string
+ hostsWithReplicaCaughtUp:
+ type: array
+ description: "List of hosts with replica caught up"
+ nullable: true
+ items:
+ type: string
+ usedTemplates:
+ type: array
+ description: "List of templates used to build this CHI"
+ nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ items:
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ spec:
+ type: object
+ # x-kubernetes-preserve-unknown-fields: true
+ description: |
+ Specification of the desired behavior of one or more ClickHouse clusters
+ More info: https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md
+ properties:
+ taskID:
+ type: string
+ description: |
+ Allows to define custom taskID for CHI update and watch status of this update execution.
+ Displayed in all .status.taskID* fields.
+ By default (if not filled) every update of CHI manifest will generate random taskID
+ stop: &TypeStringBool
+ type: string
+ description: |
+ Allows to stop all ClickHouse clusters defined in a CHI.
+ Works as the following:
+ - When `stop` is `1` operator sets `Replicas: 0` in each StatefulSet. Thie leads to having all `Pods` and `Service` deleted. All PVCs are kept intact.
+ - When `stop` is `0` operator sets `Replicas: 1` and `Pod`s and `Service`s will created again and all retained PVCs will be attached to `Pod`s.
+ enum:
+ # List StringBoolXXX constants from model
+ - ""
+ - "0"
+ - "1"
+ - "False"
+ - "false"
+ - "True"
+ - "true"
+ - "No"
+ - "no"
+ - "Yes"
+ - "yes"
+ - "Off"
+ - "off"
+ - "On"
+ - "on"
+ - "Disable"
+ - "disable"
+ - "Enable"
+ - "enable"
+ - "Disabled"
+ - "disabled"
+ - "Enabled"
+ - "enabled"
+ restart:
+ type: string
+ description: |
+ In case 'RollingUpdate' specified, the operator will always restart ClickHouse pods during reconcile.
+ This options is used in rare cases when force restart is required and is typically removed after the use in order to avoid unneeded restarts.
+ enum:
+ - ""
+ - "RollingUpdate"
+ suspend:
+ !!merge <<: *TypeStringBool
+ description: |
+ Suspend reconciliation of resources managed by a ClickHouse Installation.
+ Works as the following:
+ - When `suspend` is `true` operator stops reconciling all resources.
+ - When `suspend` is `false` or not set, operator reconciles all resources.
+ troubleshoot:
+ !!merge <<: *TypeStringBool
+ description: |
+ Allows to troubleshoot Pods during CrashLoopBack state.
+ This may happen when wrong configuration applied, in this case `clickhouse-server` wouldn't start.
+ Command within ClickHouse container is modified with `sleep` in order to avoid quick restarts
+ and give time to troubleshoot via CLI.
+ Liveness and Readiness probes are disabled as well.
+ namespaceDomainPattern:
+ type: string
+ description: |
+ Custom domain pattern which will be used for DNS names of `Service` or `Pod`.
+ Typical use scenario - custom cluster domain in Kubernetes cluster
+ Example: %s.svc.my.test
+ templating:
+ type: object
+ # nullable: true
+ description: |
+ Optional, applicable inside ClickHouseInstallationTemplate only.
+ Defines current ClickHouseInstallationTemplate application options to target ClickHouseInstallation(s)."
+ properties:
+ policy:
+ type: string
+ description: |
+ When defined as `auto` inside ClickhouseInstallationTemplate, this ClickhouseInstallationTemplate
+ will be auto-added into ClickHouseInstallation, selectable by `chiSelector`.
+ Default value is `manual`, meaning ClickHouseInstallation should request this ClickhouseInstallationTemplate explicitly.
+ enum:
+ - ""
+ - "auto"
+ - "manual"
+ chiSelector:
+ type: object
+ description: "Optional, defines selector for ClickHouseInstallation(s) to be templated with ClickhouseInstallationTemplate"
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ reconciling: &TypeReconcile
+ type: object
+ description: "[OBSOLETED] Optional, allows tuning reconciling cycle for ClickhouseInstallation from clickhouse-operator side"
+ # nullable: true
+ properties:
+ policy:
+ type: string
+ description: |
+ DISCUSSED TO BE DEPRECATED
+ Syntax sugar
+ Overrides all three 'reconcile.host.wait.{exclude, queries, include}' values from the operator's config
+ Possible values:
+ - wait - should wait to exclude host, complete queries and include host back into the cluster
+ - nowait - should NOT wait to exclude host, complete queries and include host back into the cluster
+ enum:
+ - ""
+ - "wait"
+ - "nowait"
+ configMapPropagationTimeout:
+ type: integer
+ description: |
+ Timeout in seconds for `clickhouse-operator` to wait for modified `ConfigMap` to propagate into the `Pod`
+ More details: https://kubernetes.io/docs/concepts/configuration/configmap/#mounted-configmaps-are-updated-automatically
+ minimum: 0
+ maximum: 3600
+ cleanup:
+ type: object
+ description: "Optional, defines behavior for cleanup Kubernetes resources during reconcile cycle"
+ # nullable: true
+ properties:
+ unknownObjects:
+ type: object
+ description: |
+ Describes what clickhouse-operator should do with found Kubernetes resources which should be managed by clickhouse-operator,
+ but do not have `ownerReference` to any currently managed `ClickHouseInstallation` resource.
+ Default behavior is `Delete`"
+ # nullable: true
+ properties:
+ statefulSet: &TypeObjectsCleanup
+ type: string
+ description: "Behavior policy for unknown StatefulSet, `Delete` by default"
+ enum:
+ # List ObjectsCleanupXXX constants from model
+ - ""
+ - "Retain"
+ - "Delete"
+ pvc:
+ type: string
+ !!merge <<: *TypeObjectsCleanup
+ description: "Behavior policy for unknown PVC, `Delete` by default"
+ configMap:
+ !!merge <<: *TypeObjectsCleanup
+ description: "Behavior policy for unknown ConfigMap, `Delete` by default"
+ service:
+ !!merge <<: *TypeObjectsCleanup
+ description: "Behavior policy for unknown Service, `Delete` by default"
+ reconcileFailedObjects:
+ type: object
+ description: |
+ Describes what clickhouse-operator should do with Kubernetes resources which are failed during reconcile.
+ Default behavior is `Retain`"
+ # nullable: true
+ properties:
+ statefulSet:
+ !!merge <<: *TypeObjectsCleanup
+ description: "Behavior policy for failed StatefulSet, `Retain` by default"
+ pvc:
+ !!merge <<: *TypeObjectsCleanup
+ description: "Behavior policy for failed PVC, `Retain` by default"
+ configMap:
+ !!merge <<: *TypeObjectsCleanup
+ description: "Behavior policy for failed ConfigMap, `Retain` by default"
+ service:
+ !!merge <<: *TypeObjectsCleanup
+ description: "Behavior policy for failed Service, `Retain` by default"
+ macros:
+ type: object
+ description: "macros parameters"
+ properties:
+ sections:
+ type: object
+ description: "sections behaviour for macros"
+ properties:
+ users:
+ type: object
+ description: "sections behaviour for macros on users"
+ properties:
+ enabled:
+ !!merge <<: *TypeStringBool
+ description: "enabled or not"
+ profiles:
+ type: object
+ description: "sections behaviour for macros on profiles"
+ properties:
+ enabled:
+ !!merge <<: *TypeStringBool
+ description: "enabled or not"
+ quotas:
+ type: object
+ description: "sections behaviour for macros on quotas"
+ properties:
+ enabled:
+ !!merge <<: *TypeStringBool
+ description: "enabled or not"
+ settings:
+ type: object
+ description: "sections behaviour for macros on settings"
+ properties:
+ enabled:
+ !!merge <<: *TypeStringBool
+ description: "enabled or not"
+ files:
+ type: object
+ description: "sections behaviour for macros on files"
+ properties:
+ enabled:
+ !!merge <<: *TypeStringBool
+ description: "enabled or not"
+ runtime: &TypeReconcileRuntime
+ type: object
+ description: "runtime parameters for clickhouse-operator process which are used during reconcile cycle"
+ properties:
+ reconcileShardsThreadsNumber:
+ type: integer
+ minimum: 1
+ maximum: 65535
+ description: "The maximum number of cluster shards that may be reconciled in parallel, 1 by default"
+ reconcileShardsMaxConcurrencyPercent:
+ type: integer
+ minimum: 0
+ maximum: 100
+ description: "The maximum percentage of cluster shards that may be reconciled in parallel, 50 percent by default."
+ host: &TypeReconcileHost
+ type: object
+ description: |
+ Whether the operator during reconcile procedure should wait for a ClickHouse host:
+ - to be excluded from a ClickHouse cluster
+ - to complete all running queries
+ - to be included into a ClickHouse cluster
+ respectfully before moving forward
+ properties:
+ wait:
+ type: object
+ properties:
+ exclude:
+ !!merge <<: *TypeStringBool
+ queries:
+ !!merge <<: *TypeStringBool
+ description: "Whether the operator during reconcile procedure should wait for a ClickHouse host to complete all running queries"
+ include:
+ !!merge <<: *TypeStringBool
+ description: "Whether the operator during reconcile procedure should wait for a ClickHouse host to be included into a ClickHouse cluster"
+ replicas:
+ type: object
+ description: "Whether the operator during reconcile procedure should wait for replicas to catch-up"
+ properties:
+ all:
+ !!merge <<: *TypeStringBool
+ description: "Whether the operator during reconcile procedure should wait for all replicas to catch-up"
+ new:
+ !!merge <<: *TypeStringBool
+ description: "Whether the operator during reconcile procedure should wait for new replicas to catch-up"
+ delay:
+ type: integer
+ description: "replication max absolute delay to consider replica is not delayed"
+ probes:
+ type: object
+ description: "What probes the operator should wait during host launch procedure"
+ properties:
+ startup:
+ !!merge <<: *TypeStringBool
+ description: |
+ Whether the operator during host launch procedure should wait for startup probe to succeed.
+ In case probe is unspecified wait is assumed to be completed successfully.
+ Default option value is to do not wait.
+ readiness:
+ !!merge <<: *TypeStringBool
+ description: |
+ Whether the operator during host launch procedure should wait for ready probe to succeed.
+ In case probe is unspecified wait is assumed to be completed successfully.
+ Default option value is to wait.
+ drop:
+ type: object
+ properties:
+ replicas:
+ type: object
+ description: |
+ Whether the operator during reconcile procedure should drop replicas when replica is deleted or recreated
+ properties:
+ onDelete:
+ !!merge <<: *TypeStringBool
+ description: |
+ Whether the operator during reconcile procedure should drop replicas when replica is deleted
+ onLostVolume:
+ !!merge <<: *TypeStringBool
+ description: |
+ Whether the operator during reconcile procedure should drop replicas when replica volume is lost
+ active:
+ !!merge <<: *TypeStringBool
+ description: |
+ Whether the operator during reconcile procedure should drop active replicas when replica is deleted or recreated
+ reconcile:
+ !!merge <<: *TypeReconcile
+ description: "Optional, allows tuning reconciling cycle for ClickhouseInstallation from clickhouse-operator side"
+ defaults:
+ type: object
+ description: |
+ define default behavior for whole ClickHouseInstallation, some behavior can be re-define on cluster, shard and replica level
+ More info: https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#specdefaults
+ # nullable: true
+ properties:
+ replicasUseFQDN:
+ !!merge <<: *TypeStringBool
+ description: |
+ define should replicas be specified by FQDN in ``.
+ In case of "no" will use short hostname and clickhouse-server will use kubernetes default suffixes for DNS lookup
+ "no" by default
+ distributedDDL:
+ type: object
+ description: |
+ allows change `` settings
+ More info: https://clickhouse.tech/docs/en/operations/server-configuration-parameters/settings/#server-settings-distributed_ddl
+ # nullable: true
+ properties:
+ profile:
+ type: string
+ description: "Settings from this profile will be used to execute DDL queries"
+ storageManagement:
+ type: object
+ description: default storage management options
+ properties:
+ provisioner: &TypePVCProvisioner
+ type: string
+ description: "defines `PVC` provisioner - be it StatefulSet or the Operator"
+ enum:
+ - ""
+ - "StatefulSet"
+ - "Operator"
+ reclaimPolicy: &TypePVCReclaimPolicy
+ type: string
+ description: |
+ defines behavior of `PVC` deletion.
+ `Delete` by default, if `Retain` specified then `PVC` will be kept when deleting StatefulSet
+ enum:
+ - ""
+ - "Retain"
+ - "Delete"
+ templates: &TypeTemplateNames
+ type: object
+ description: "optional, configuration of the templates names which will use for generate Kubernetes resources according to one or more ClickHouse clusters described in current ClickHouseInstallation (chi) resource"
+ # nullable: true
+ properties:
+ hostTemplate:
+ type: string
+ description: "optional, template name from chi.spec.templates.hostTemplates, which will apply to configure every `clickhouse-server` instance during render ConfigMap resources which will mount into `Pod`"
+ podTemplate:
+ type: string
+ description: "optional, template name from chi.spec.templates.podTemplates, allows customization each `Pod` resource during render and reconcile each StatefulSet.spec resource described in `chi.spec.configuration.clusters`"
+ dataVolumeClaimTemplate:
+ type: string
+ description: "optional, template name from chi.spec.templates.volumeClaimTemplates, allows customization each `PVC` which will mount for clickhouse data directory in each `Pod` during render and reconcile every StatefulSet.spec resource described in `chi.spec.configuration.clusters`"
+ logVolumeClaimTemplate:
+ type: string
+ description: "optional, template name from chi.spec.templates.volumeClaimTemplates, allows customization each `PVC` which will mount for clickhouse log directory in each `Pod` during render and reconcile every StatefulSet.spec resource described in `chi.spec.configuration.clusters`"
+ serviceTemplate:
+ type: string
+ description: "optional, template name from chi.spec.templates.serviceTemplates. used for customization of the `Service` resource, created by `clickhouse-operator` to cover all clusters in whole `chi` resource"
+ serviceTemplates:
+ type: array
+ description: "optional, template names from chi.spec.templates.serviceTemplates. used for customization of the `Service` resources, created by `clickhouse-operator` to cover all clusters in whole `chi` resource"
+ nullable: true
+ items:
+ type: string
+ clusterServiceTemplate:
+ type: string
+ description: "optional, template name from chi.spec.templates.serviceTemplates, allows customization for each `Service` resource which will created by `clickhouse-operator` which cover each clickhouse cluster described in `chi.spec.configuration.clusters`"
+ shardServiceTemplate:
+ type: string
+ description: "optional, template name from chi.spec.templates.serviceTemplates, allows customization for each `Service` resource which will created by `clickhouse-operator` which cover each shard inside clickhouse cluster described in `chi.spec.configuration.clusters`"
+ replicaServiceTemplate:
+ type: string
+ description: "optional, template name from chi.spec.templates.serviceTemplates, allows customization for each `Service` resource which will created by `clickhouse-operator` which cover each replica inside each shard inside each clickhouse cluster described in `chi.spec.configuration.clusters`"
+ volumeClaimTemplate:
+ type: string
+ description: "optional, alias for dataVolumeClaimTemplate, template name from chi.spec.templates.volumeClaimTemplates, allows customization each `PVC` which will mount for clickhouse data directory in each `Pod` during render and reconcile every StatefulSet.spec resource described in `chi.spec.configuration.clusters`"
+ configuration:
+ type: object
+ description: "allows configure multiple aspects and behavior for `clickhouse-server` instance and also allows describe multiple `clickhouse-server` clusters inside one `chi` resource"
+ # nullable: true
+ properties:
+ zookeeper: &TypeZookeeperConfig
+ type: object
+ description: |
+ allows configure .. section in each `Pod` during generate `ConfigMap` which will mounted in `/etc/clickhouse-server/config.d/`
+ `clickhouse-operator` itself doesn't manage Zookeeper, please install Zookeeper separatelly look examples on https://github.com/Altinity/clickhouse-operator/tree/master/deploy/zookeeper/
+ currently, zookeeper (or clickhouse-keeper replacement) used for *ReplicatedMergeTree table engines and for `distributed_ddl`
+ More details: https://clickhouse.tech/docs/en/operations/server-configuration-parameters/settings/#server-settings_zookeeper
+ # nullable: true
+ properties:
+ nodes:
+ type: array
+ description: "describe every available zookeeper cluster node for interaction"
+ # nullable: true
+ items:
+ type: object
+ #required:
+ # - host
+ properties:
+ host:
+ type: string
+ description: "dns name or ip address for Zookeeper node"
+ port:
+ type: integer
+ description: "TCP port which used to connect to Zookeeper node"
+ minimum: 0
+ maximum: 65535
+ secure:
+ !!merge <<: *TypeStringBool
+ description: "if a secure connection to Zookeeper is required"
+ availabilityZone:
+ type: string
+ description: "availability zone for Zookeeper node"
+ session_timeout_ms:
+ type: integer
+ description: "session timeout during connect to Zookeeper"
+ operation_timeout_ms:
+ type: integer
+ description: "one operation timeout during Zookeeper transactions"
+ root:
+ type: string
+ description: "optional root znode path inside zookeeper to store ClickHouse related data (replication queue or distributed DDL)"
+ identity:
+ type: string
+ description: "optional access credentials string with `user:password` format used when use digest authorization in Zookeeper"
+ use_compression:
+ !!merge <<: *TypeStringBool
+ description: "Enables compression in Keeper protocol if set to true"
+ users:
+ type: object
+ description: |
+ allows configure .. section in each `Pod` during generate `ConfigMap` which will mount in `/etc/clickhouse-server/users.d/`
+ you can configure password hashed, authorization restrictions, database level security row filters etc.
+ More details: https://clickhouse.tech/docs/en/operations/settings/settings-users/
+ Your yaml code will convert to XML, see examples https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#specconfigurationusers
+
+ any key could contains `valueFrom` with `secretKeyRef` which allow pass password from kubernetes secrets
+ secret value will pass in `pod.spec.containers.evn`, and generate with from_env=XXX in XML in /etc/clickhouse-server/users.d/chop-generated-users.xml
+ it not allow automatically updates when updates `secret`, change spec.taskID for manually trigger reconcile cycle
+
+ look into https://github.com/Altinity/clickhouse-operator/blob/master/docs/chi-examples/05-settings-01-overview.yaml for examples
+
+ any key with prefix `k8s_secret_` shall has value with format namespace/secret/key or secret/key
+ in this case value from secret will write directly into XML tag during render *-usersd ConfigMap
+
+ any key with prefix `k8s_secret_env` shall has value with format namespace/secret/key or secret/key
+ in this case value from secret will write into environment variable and write to XML tag via from_env=XXX
+
+ look into https://github.com/Altinity/clickhouse-operator/blob/master/docs/chi-examples/05-settings-01-overview.yaml for examples
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ profiles:
+ type: object
+ description: |
+ allows configure .. section in each `Pod` during generate `ConfigMap` which will mount in `/etc/clickhouse-server/users.d/`
+ you can configure any aspect of settings profile
+ More details: https://clickhouse.tech/docs/en/operations/settings/settings-profiles/
+ Your yaml code will convert to XML, see examples https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#specconfigurationprofiles
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ quotas:
+ type: object
+ description: |
+ allows configure .. section in each `Pod` during generate `ConfigMap` which will mount in `/etc/clickhouse-server/users.d/`
+ you can configure any aspect of resource quotas
+ More details: https://clickhouse.tech/docs/en/operations/quotas/
+ Your yaml code will convert to XML, see examples https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#specconfigurationquotas
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ settings: &TypeSettings
+ type: object
+ description: |
+ allows configure `clickhouse-server` settings inside ... tag in each `Pod` during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/`
+ More details: https://clickhouse.tech/docs/en/operations/settings/settings/
+ Your yaml code will convert to XML, see examples https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#specconfigurationsettings
+
+ any key could contains `valueFrom` with `secretKeyRef` which allow pass password from kubernetes secrets
+ look into https://github.com/Altinity/clickhouse-operator/blob/master/docs/chi-examples/05-settings-01-overview.yaml for examples
+
+ secret value will pass in `pod.spec.env`, and generate with from_env=XXX in XML in /etc/clickhouse-server/config.d/chop-generated-settings.xml
+ it not allow automatically updates when updates `secret`, change spec.taskID for manually trigger reconcile cycle
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ files: &TypeFiles
+ type: object
+ description: |
+ allows define content of any setting file inside each `Pod` during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/`
+ every key in this object is the file name
+ every value in this object is the file content
+ you can use `!!binary |` and base64 for binary files, see details here https://yaml.org/type/binary.html
+ each key could contains prefix like {common}, {users}, {hosts} or config.d, users.d, conf.d, wrong prefixes will be ignored, subfolders also will be ignored
+ More details: https://github.com/Altinity/clickhouse-operator/blob/master/docs/chi-examples/05-settings-05-files-nested.yaml
+
+ any key could contains `valueFrom` with `secretKeyRef` which allow pass values from kubernetes secrets
+ secrets will mounted into pod as separate volume in /etc/clickhouse-server/secrets.d/
+ and will automatically update when update secret
+ it useful for pass SSL certificates from cert-manager or similar tool
+ look into https://github.com/Altinity/clickhouse-operator/blob/master/docs/chi-examples/05-settings-01-overview.yaml for examples
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ clusters:
+ type: array
+ description: |
+ describes clusters layout and allows change settings on cluster-level, shard-level and replica-level
+ every cluster is a set of StatefulSet, one StatefulSet contains only one Pod with `clickhouse-server`
+ all Pods will rendered in part of ClickHouse configs, mounted from ConfigMap as `/etc/clickhouse-server/config.d/chop-generated-remote_servers.xml`
+ Clusters will use for Distributed table engine, more details: https://clickhouse.tech/docs/en/engines/table-engines/special/distributed/
+ If `cluster` contains zookeeper settings (could be inherited from top `chi` level), when you can create *ReplicatedMergeTree tables
+ # nullable: true
+ items:
+ type: object
+ #required:
+ # - name
+ properties:
+ name:
+ type: string
+ description: "cluster name, used to identify set of servers and wide used during generate names of related Kubernetes resources"
+ minLength: 1
+ # See namePartClusterMaxLen const
+ maxLength: 15
+ pattern: "^[a-zA-Z0-9-]{0,15}$"
+ zookeeper:
+ !!merge <<: *TypeZookeeperConfig
+ description: |
+ optional, allows configure .. section in each `Pod` only in current ClickHouse cluster, during generate `ConfigMap` which will mounted in `/etc/clickhouse-server/config.d/`
+ override top-level `chi.spec.configuration.zookeeper` settings
+ settings:
+ !!merge <<: *TypeSettings
+ description: |
+ optional, allows configure `clickhouse-server` settings inside ... tag in each `Pod` only in one cluster during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/`
+ override top-level `chi.spec.configuration.settings`
+ More details: https://clickhouse.tech/docs/en/operations/settings/settings/
+ files:
+ !!merge <<: *TypeFiles
+ description: |
+ optional, allows define content of any setting file inside each `Pod` on current cluster during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/`
+ override top-level `chi.spec.configuration.files`
+ templates:
+ !!merge <<: *TypeTemplateNames
+ description: |
+ optional, configuration of the templates names which will use for generate Kubernetes resources according to selected cluster
+ override top-level `chi.spec.configuration.templates`
+ schemaPolicy:
+ type: object
+ description: |
+ describes how schema is propagated within replicas and shards
+ properties:
+ replica:
+ type: string
+ description: "how schema is propagated within a replica"
+ enum:
+ # List SchemaPolicyReplicaXXX constants from model
+ - ""
+ - "None"
+ - "All"
+ shard:
+ type: string
+ description: "how schema is propagated between shards"
+ enum:
+ # List SchemaPolicyShardXXX constants from model
+ - ""
+ - "None"
+ - "All"
+ - "DistributedTablesOnly"
+ insecure:
+ !!merge <<: *TypeStringBool
+ description: optional, open insecure ports for cluster, defaults to "yes"
+ secure:
+ !!merge <<: *TypeStringBool
+ description: optional, open secure ports for cluster
+ secret:
+ type: object
+ description: "optional, shared secret value to secure cluster communications"
+ properties:
+ auto:
+ !!merge <<: *TypeStringBool
+ description: "Auto-generate shared secret value to secure cluster communications"
+ value:
+ description: "Cluster shared secret value in plain text"
+ type: string
+ valueFrom:
+ description: "Cluster shared secret source"
+ type: object
+ properties:
+ secretKeyRef:
+ description: |
+ Selects a key of a secret in the clickhouse installation namespace.
+ Should not be used if value is not empty.
+ type: object
+ properties:
+ name:
+ description: |
+ Name of the referent. More info:
+ https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ key:
+ description: The key of the secret to select from. Must be a valid secret key.
+ type: string
+ optional:
+ description: Specify whether the Secret or its key must be defined
+ type: boolean
+ required:
+ - name
+ - key
+ pdbManaged:
+ !!merge <<: *TypeStringBool
+ description: |
+ Specifies whether the Pod Disruption Budget (PDB) should be managed.
+ During the next installation, if PDB management is enabled, the operator will
+ attempt to retrieve any existing PDB. If none is found, it will create a new one
+ and initiate a reconciliation loop. If PDB management is disabled, the existing PDB
+ will remain intact, and the reconciliation loop will not be executed. By default,
+ PDB management is enabled.
+ pdbMaxUnavailable:
+ type: integer
+ description: |
+ Pod eviction is allowed if at most "pdbMaxUnavailable" pods are unavailable after the eviction,
+ i.e. even in absence of the evicted pod. For example, one can prevent all voluntary evictions
+ by specifying 0. This is a mutually exclusive setting with "minAvailable".
+ minimum: 0
+ maximum: 65535
+ reconcile:
+ type: object
+ description: "allow tuning reconciling process"
+ properties:
+ runtime:
+ !!merge <<: *TypeReconcileRuntime
+ host:
+ !!merge <<: *TypeReconcileHost
+ layout:
+ type: object
+ description: |
+ describe current cluster layout, how much shards in cluster, how much replica in shard
+ allows override settings on each shard and replica separatelly
+ # nullable: true
+ properties:
+ shardsCount:
+ type: integer
+ description: |
+ how much shards for current ClickHouse cluster will run in Kubernetes,
+ each shard contains shared-nothing part of data and contains set of replicas,
+ cluster contains 1 shard by default"
+ replicasCount:
+ type: integer
+ description: |
+ how much replicas in each shards for current cluster will run in Kubernetes,
+ each replica is a separate `StatefulSet` which contains only one `Pod` with `clickhouse-server` instance,
+ every shard contains 1 replica by default"
+ shards:
+ type: array
+ description: |
+ optional, allows override top-level `chi.spec.configuration`, cluster-level
+ `chi.spec.configuration.clusters` settings for each shard separately,
+ use it only if you fully understand what you do"
+ # nullable: true
+ items:
+ type: object
+ properties:
+ name:
+ type: string
+ description: "optional, by default shard name is generated, but you can override it and setup custom name"
+ minLength: 1
+ # See namePartShardMaxLen const
+ maxLength: 15
+ pattern: "^[a-zA-Z0-9-]{0,15}$"
+ definitionType:
+ type: string
+ description: "DEPRECATED - to be removed soon"
+ weight:
+ type: integer
+ description: |
+ optional, 1 by default, allows setup shard setting which will use during insert into tables with `Distributed` engine,
+ will apply in inside ConfigMap which will mount in /etc/clickhouse-server/config.d/chop-generated-remote_servers.xml
+ More details: https://clickhouse.tech/docs/en/engines/table-engines/special/distributed/
+ internalReplication:
+ !!merge <<: *TypeStringBool
+ description: |
+ optional, `true` by default when `chi.spec.configuration.clusters[].layout.ReplicaCount` > 1 and 0 otherwise
+ allows setup setting which will use during insert into tables with `Distributed` engine for insert only in one live replica and other replicas will download inserted data during replication,
+ will apply in inside ConfigMap which will mount in /etc/clickhouse-server/config.d/chop-generated-remote_servers.xml
+ More details: https://clickhouse.tech/docs/en/engines/table-engines/special/distributed/
+ settings:
+ !!merge <<: *TypeSettings
+ description: |
+ optional, allows configure `clickhouse-server` settings inside ... tag in each `Pod` only in one shard during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/`
+ override top-level `chi.spec.configuration.settings` and cluster-level `chi.spec.configuration.clusters.settings`
+ More details: https://clickhouse.tech/docs/en/operations/settings/settings/
+ files:
+ !!merge <<: *TypeFiles
+ description: |
+ optional, allows define content of any setting file inside each `Pod` only in one shard during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/`
+ override top-level `chi.spec.configuration.files` and cluster-level `chi.spec.configuration.clusters.files`
+ templates:
+ !!merge <<: *TypeTemplateNames
+ description: |
+ optional, configuration of the templates names which will use for generate Kubernetes resources according to selected shard
+ override top-level `chi.spec.configuration.templates` and cluster-level `chi.spec.configuration.clusters.templates`
+ replicasCount:
+ type: integer
+ description: |
+ optional, how much replicas in selected shard for selected ClickHouse cluster will run in Kubernetes, each replica is a separate `StatefulSet` which contains only one `Pod` with `clickhouse-server` instance,
+ shard contains 1 replica by default
+ override cluster-level `chi.spec.configuration.clusters.layout.replicasCount`
+ minimum: 1
+ replicas:
+ type: array
+ description: |
+ optional, allows override behavior for selected replicas from cluster-level `chi.spec.configuration.clusters` and shard-level `chi.spec.configuration.clusters.layout.shards`
+ # nullable: true
+ items:
+ # Host
+ type: object
+ properties:
+ name:
+ type: string
+ description: "optional, by default replica name is generated, but you can override it and setup custom name"
+ minLength: 1
+ # See namePartReplicaMaxLen const
+ maxLength: 15
+ pattern: "^[a-zA-Z0-9-]{0,15}$"
+ insecure:
+ !!merge <<: *TypeStringBool
+ description: |
+ optional, open insecure ports for cluster, defaults to "yes"
+ secure:
+ !!merge <<: *TypeStringBool
+ description: |
+ optional, open secure ports
+ tcpPort:
+ type: integer
+ description: |
+ optional, setup `Pod.spec.containers.ports` with name `tcp` for selected replica, override `chi.spec.templates.hostTemplates.spec.tcpPort`
+ allows connect to `clickhouse-server` via TCP Native protocol via kubernetes `Service`
+ minimum: 1
+ maximum: 65535
+ tlsPort:
+ type: integer
+ minimum: 1
+ maximum: 65535
+ httpPort:
+ type: integer
+ description: |
+ optional, setup `Pod.spec.containers.ports` with name `http` for selected replica, override `chi.spec.templates.hostTemplates.spec.httpPort`
+ allows connect to `clickhouse-server` via HTTP protocol via kubernetes `Service`
+ minimum: 1
+ maximum: 65535
+ httpsPort:
+ type: integer
+ minimum: 1
+ maximum: 65535
+ interserverHTTPPort:
+ type: integer
+ description: |
+ optional, setup `Pod.spec.containers.ports` with name `interserver` for selected replica, override `chi.spec.templates.hostTemplates.spec.interserverHTTPPort`
+ allows connect between replicas inside same shard during fetch replicated data parts HTTP protocol
+ minimum: 1
+ maximum: 65535
+ settings:
+ !!merge <<: *TypeSettings
+ description: |
+ optional, allows configure `clickhouse-server` settings inside ... tag in `Pod` only in one replica during generate `ConfigMap` which will mount in `/etc/clickhouse-server/conf.d/`
+ override top-level `chi.spec.configuration.settings`, cluster-level `chi.spec.configuration.clusters.settings` and shard-level `chi.spec.configuration.clusters.layout.shards.settings`
+ More details: https://clickhouse.tech/docs/en/operations/settings/settings/
+ files:
+ !!merge <<: *TypeFiles
+ description: |
+ optional, allows define content of any setting file inside `Pod` only in one replica during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/`
+ override top-level `chi.spec.configuration.files`, cluster-level `chi.spec.configuration.clusters.files` and shard-level `chi.spec.configuration.clusters.layout.shards.files`
+ templates:
+ !!merge <<: *TypeTemplateNames
+ description: |
+ optional, configuration of the templates names which will use for generate Kubernetes resources according to selected replica
+ override top-level `chi.spec.configuration.templates`, cluster-level `chi.spec.configuration.clusters.templates` and shard-level `chi.spec.configuration.clusters.layout.shards.templates`
+ replicas:
+ type: array
+ description: "optional, allows override top-level `chi.spec.configuration` and cluster-level `chi.spec.configuration.clusters` configuration for each replica and each shard relates to selected replica, use it only if you fully understand what you do"
+ # nullable: true
+ items:
+ type: object
+ properties:
+ name:
+ type: string
+ description: "optional, by default replica name is generated, but you can override it and setup custom name"
+ minLength: 1
+ # See namePartShardMaxLen const
+ maxLength: 15
+ pattern: "^[a-zA-Z0-9-]{0,15}$"
+ settings:
+ !!merge <<: *TypeSettings
+ description: |
+ optional, allows configure `clickhouse-server` settings inside ... tag in `Pod` only in one replica during generate `ConfigMap` which will mount in `/etc/clickhouse-server/conf.d/`
+ override top-level `chi.spec.configuration.settings`, cluster-level `chi.spec.configuration.clusters.settings` and will ignore if shard-level `chi.spec.configuration.clusters.layout.shards` present
+ More details: https://clickhouse.tech/docs/en/operations/settings/settings/
+ files:
+ !!merge <<: *TypeFiles
+ description: |
+ optional, allows define content of any setting file inside each `Pod` only in one replica during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/`
+ override top-level `chi.spec.configuration.files` and cluster-level `chi.spec.configuration.clusters.files`, will ignore if `chi.spec.configuration.clusters.layout.shards` presents
+ templates:
+ !!merge <<: *TypeTemplateNames
+ description: |
+ optional, configuration of the templates names which will use for generate Kubernetes resources according to selected replica
+ override top-level `chi.spec.configuration.templates`, cluster-level `chi.spec.configuration.clusters.templates`
+ shardsCount:
+ type: integer
+ description: "optional, count of shards related to current replica, you can override each shard behavior on low-level `chi.spec.configuration.clusters.layout.replicas.shards`"
+ minimum: 1
+ shards:
+ type: array
+ description: "optional, list of shards related to current replica, will ignore if `chi.spec.configuration.clusters.layout.shards` presents"
+ # nullable: true
+ items:
+ # Host
+ type: object
+ properties:
+ name:
+ type: string
+ description: "optional, by default shard name is generated, but you can override it and setup custom name"
+ minLength: 1
+ # See namePartReplicaMaxLen const
+ maxLength: 15
+ pattern: "^[a-zA-Z0-9-]{0,15}$"
+ insecure:
+ !!merge <<: *TypeStringBool
+ description: |
+ optional, open insecure ports for cluster, defaults to "yes"
+ secure:
+ !!merge <<: *TypeStringBool
+ description: |
+ optional, open secure ports
+ tcpPort:
+ type: integer
+ description: |
+ optional, setup `Pod.spec.containers.ports` with name `tcp` for selected shard, override `chi.spec.templates.hostTemplates.spec.tcpPort`
+ allows connect to `clickhouse-server` via TCP Native protocol via kubernetes `Service`
+ minimum: 1
+ maximum: 65535
+ tlsPort:
+ type: integer
+ minimum: 1
+ maximum: 65535
+ httpPort:
+ type: integer
+ description: |
+ optional, setup `Pod.spec.containers.ports` with name `http` for selected shard, override `chi.spec.templates.hostTemplates.spec.httpPort`
+ allows connect to `clickhouse-server` via HTTP protocol via kubernetes `Service`
+ minimum: 1
+ maximum: 65535
+ httpsPort:
+ type: integer
+ minimum: 1
+ maximum: 65535
+ interserverHTTPPort:
+ type: integer
+ description: |
+ optional, setup `Pod.spec.containers.ports` with name `interserver` for selected shard, override `chi.spec.templates.hostTemplates.spec.interserverHTTPPort`
+ allows connect between replicas inside same shard during fetch replicated data parts HTTP protocol
+ minimum: 1
+ maximum: 65535
+ settings:
+ !!merge <<: *TypeSettings
+ description: |
+ optional, allows configure `clickhouse-server` settings inside ... tag in `Pod` only in one shard related to current replica during generate `ConfigMap` which will mount in `/etc/clickhouse-server/conf.d/`
+ override top-level `chi.spec.configuration.settings`, cluster-level `chi.spec.configuration.clusters.settings` and replica-level `chi.spec.configuration.clusters.layout.replicas.settings`
+ More details: https://clickhouse.tech/docs/en/operations/settings/settings/
+ files:
+ !!merge <<: *TypeFiles
+ description: |
+ optional, allows define content of any setting file inside each `Pod` only in one shard related to current replica during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/`
+ override top-level `chi.spec.configuration.files` and cluster-level `chi.spec.configuration.clusters.files`, will ignore if `chi.spec.configuration.clusters.layout.shards` presents
+ templates:
+ !!merge <<: *TypeTemplateNames
+ description: |
+ optional, configuration of the templates names which will use for generate Kubernetes resources according to selected replica
+ override top-level `chi.spec.configuration.templates`, cluster-level `chi.spec.configuration.clusters.templates`, replica-level `chi.spec.configuration.clusters.layout.replicas.templates`
+ templates:
+ type: object
+ description: "allows define templates which will use for render Kubernetes resources like StatefulSet, ConfigMap, Service, PVC, by default, clickhouse-operator have own templates, but you can override it"
+ # nullable: true
+ properties:
+ hostTemplates:
+ type: array
+ description: "hostTemplate will use during apply to generate `clickhose-server` config files"
+ # nullable: true
+ items:
+ type: object
+ #required:
+ # - name
+ properties:
+ name:
+ description: "template name, could use to link inside top-level `chi.spec.defaults.templates.hostTemplate`, cluster-level `chi.spec.configuration.clusters.templates.hostTemplate`, shard-level `chi.spec.configuration.clusters.layout.shards.temlates.hostTemplate`, replica-level `chi.spec.configuration.clusters.layout.replicas.templates.hostTemplate`"
+ type: string
+ portDistribution:
+ type: array
+ description: "define how will distribute numeric values of named ports in `Pod.spec.containers.ports` and clickhouse-server configs"
+ # nullable: true
+ items:
+ type: object
+ #required:
+ # - type
+ properties:
+ type:
+ type: string
+ description: "type of distribution, when `Unspecified` (default value) then all listen ports on clickhouse-server configuration in all Pods will have the same value, when `ClusterScopeIndex` then ports will increment to offset from base value depends on shard and replica index inside cluster with combination of `chi.spec.templates.podTemlates.spec.HostNetwork` it allows setup ClickHouse cluster inside Kubernetes and provide access via external network bypass Kubernetes internal network"
+ enum:
+ # List PortDistributionXXX constants
+ - ""
+ - "Unspecified"
+ - "ClusterScopeIndex"
+ spec:
+ # Host
+ type: object
+ properties:
+ name:
+ type: string
+ description: "by default, hostname will generate, but this allows define custom name for each `clickhouse-server`"
+ minLength: 1
+ # See namePartReplicaMaxLen const
+ maxLength: 15
+ pattern: "^[a-zA-Z0-9-]{0,15}$"
+ insecure:
+ !!merge <<: *TypeStringBool
+ description: |
+ optional, open insecure ports for cluster, defaults to "yes"
+ secure:
+ !!merge <<: *TypeStringBool
+ description: |
+ optional, open secure ports
+ tcpPort:
+ type: integer
+ description: |
+ optional, setup `tcp_port` inside `clickhouse-server` settings for each Pod where current template will apply
+ if specified, should have equal value with `chi.spec.templates.podTemplates.spec.containers.ports[name=tcp]`
+ More info: https://clickhouse.tech/docs/en/interfaces/tcp/
+ minimum: 1
+ maximum: 65535
+ tlsPort:
+ type: integer
+ minimum: 1
+ maximum: 65535
+ httpPort:
+ type: integer
+ description: |
+ optional, setup `http_port` inside `clickhouse-server` settings for each Pod where current template will apply
+ if specified, should have equal value with `chi.spec.templates.podTemplates.spec.containers.ports[name=http]`
+ More info: https://clickhouse.tech/docs/en/interfaces/http/
+ minimum: 1
+ maximum: 65535
+ httpsPort:
+ type: integer
+ minimum: 1
+ maximum: 65535
+ interserverHTTPPort:
+ type: integer
+ description: |
+ optional, setup `interserver_http_port` inside `clickhouse-server` settings for each Pod where current template will apply
+ if specified, should have equal value with `chi.spec.templates.podTemplates.spec.containers.ports[name=interserver]`
+ More info: https://clickhouse.tech/docs/en/operations/server-configuration-parameters/settings/#interserver-http-port
+ minimum: 1
+ maximum: 65535
+ settings:
+ !!merge <<: *TypeSettings
+ description: |
+ optional, allows configure `clickhouse-server` settings inside ... tag in each `Pod` where this template will apply during generate `ConfigMap` which will mount in `/etc/clickhouse-server/conf.d/`
+ More details: https://clickhouse.tech/docs/en/operations/settings/settings/
+ files:
+ !!merge <<: *TypeFiles
+ description: |
+ optional, allows define content of any setting file inside each `Pod` where this template will apply during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/`
+ templates:
+ !!merge <<: *TypeTemplateNames
+ description: "be careful, this part of CRD allows override template inside template, don't use it if you don't understand what you do"
+ podTemplates:
+ type: array
+ description: |
+ podTemplate will use during render `Pod` inside `StatefulSet.spec` and allows define rendered `Pod.spec`, pod scheduling distribution and pod zone
+ More information: https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#spectemplatespodtemplates
+ # nullable: true
+ items:
+ type: object
+ #required:
+ # - name
+ properties:
+ name:
+ type: string
+ description: "template name, could use to link inside top-level `chi.spec.defaults.templates.podTemplate`, cluster-level `chi.spec.configuration.clusters.templates.podTemplate`, shard-level `chi.spec.configuration.clusters.layout.shards.temlates.podTemplate`, replica-level `chi.spec.configuration.clusters.layout.replicas.templates.podTemplate`"
+ generateName:
+ type: string
+ description: "allows define format for generated `Pod` name, look to https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#spectemplatesservicetemplates for details about available template variables"
+ zone:
+ type: object
+ description: "allows define custom zone name and will separate ClickHouse `Pods` between nodes, shortcut for `chi.spec.templates.podTemplates.spec.affinity.podAntiAffinity`"
+ #required:
+ # - values
+ properties:
+ key:
+ type: string
+ description: "optional, if defined, allows select kubernetes nodes by label with `name` equal `key`"
+ values:
+ type: array
+ description: "optional, if defined, allows select kubernetes nodes by label with `value` in `values`"
+ # nullable: true
+ items:
+ type: string
+ distribution:
+ type: string
+ description: "DEPRECATED, shortcut for `chi.spec.templates.podTemplates.spec.affinity.podAntiAffinity`"
+ enum:
+ - ""
+ - "Unspecified"
+ - "OnePerHost"
+ podDistribution:
+ type: array
+ description: "define ClickHouse Pod distribution policy between Kubernetes Nodes inside Shard, Replica, Namespace, CHI, another ClickHouse cluster"
+ # nullable: true
+ items:
+ type: object
+ #required:
+ # - type
+ properties:
+ type:
+ type: string
+ description: "you can define multiple affinity policy types"
+ enum:
+ # List PodDistributionXXX constants
+ - ""
+ - "Unspecified"
+ - "ClickHouseAntiAffinity"
+ - "ShardAntiAffinity"
+ - "ReplicaAntiAffinity"
+ - "AnotherNamespaceAntiAffinity"
+ - "AnotherClickHouseInstallationAntiAffinity"
+ - "AnotherClusterAntiAffinity"
+ - "MaxNumberPerNode"
+ - "NamespaceAffinity"
+ - "ClickHouseInstallationAffinity"
+ - "ClusterAffinity"
+ - "ShardAffinity"
+ - "ReplicaAffinity"
+ - "PreviousTailAffinity"
+ - "CircularReplication"
+ scope:
+ type: string
+ description: "scope for apply each podDistribution"
+ enum:
+ # list PodDistributionScopeXXX constants
+ - ""
+ - "Unspecified"
+ - "Shard"
+ - "Replica"
+ - "Cluster"
+ - "ClickHouseInstallation"
+ - "Namespace"
+ number:
+ type: integer
+ description: "define, how much ClickHouse Pods could be inside selected scope with selected distribution type"
+ minimum: 0
+ maximum: 65535
+ topologyKey:
+ type: string
+ description: |
+ use for inter-pod affinity look to `pod.spec.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.topologyKey`,
+ more info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity"
+ metadata:
+ type: object
+ description: |
+ allows pass standard object's metadata from template to Pod
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ spec:
+ # TODO specify PodSpec
+ type: object
+ description: "allows define whole Pod.spec inside StaefulSet.spec, look to https://kubernetes.io/docs/concepts/workloads/pods/#pod-templates for details"
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ volumeClaimTemplates:
+ type: array
+ description: |
+ allows define template for rendering `PVC` kubernetes resource, which would use inside `Pod` for mount clickhouse `data`, clickhouse `logs` or something else
+ # nullable: true
+ items:
+ type: object
+ #required:
+ # - name
+ # - spec
+ properties:
+ name:
+ type: string
+ description: |
+ template name, could use to link inside
+ top-level `chi.spec.defaults.templates.dataVolumeClaimTemplate` or `chi.spec.defaults.templates.logVolumeClaimTemplate`,
+ cluster-level `chi.spec.configuration.clusters.templates.dataVolumeClaimTemplate` or `chi.spec.configuration.clusters.templates.logVolumeClaimTemplate`,
+ shard-level `chi.spec.configuration.clusters.layout.shards.temlates.dataVolumeClaimTemplate` or `chi.spec.configuration.clusters.layout.shards.temlates.logVolumeClaimTemplate`
+ replica-level `chi.spec.configuration.clusters.layout.replicas.templates.dataVolumeClaimTemplate` or `chi.spec.configuration.clusters.layout.replicas.templates.logVolumeClaimTemplate`
+ provisioner: *TypePVCProvisioner
+ reclaimPolicy: *TypePVCReclaimPolicy
+ metadata:
+ type: object
+ description: |
+ allows to pass standard object's metadata from template to PVC
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ spec:
+ type: object
+ description: |
+ allows define all aspects of `PVC` resource
+ More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ serviceTemplates:
+ type: array
+ description: |
+ allows define template for rendering `Service` which would get endpoint from Pods which scoped chi-wide, cluster-wide, shard-wide, replica-wide level
+ # nullable: true
+ items:
+ type: object
+ #required:
+ # - name
+ # - spec
+ properties:
+ name:
+ type: string
+ description: |
+ template name, could use to link inside
+ chi-level `chi.spec.defaults.templates.serviceTemplate`
+ cluster-level `chi.spec.configuration.clusters.templates.clusterServiceTemplate`
+ shard-level `chi.spec.configuration.clusters.layout.shards.temlates.shardServiceTemplate`
+ replica-level `chi.spec.configuration.clusters.layout.replicas.templates.replicaServiceTemplate` or `chi.spec.configuration.clusters.layout.shards.replicas.replicaServiceTemplate`
+ generateName:
+ type: string
+ description: |
+ allows define format for generated `Service` name,
+ look to https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#spectemplatesservicetemplates
+ for details about available template variables"
+ metadata:
+ # TODO specify ObjectMeta
+ type: object
+ description: |
+ allows pass standard object's metadata from template to Service
+ Could be use for define specificly for Cloud Provider metadata which impact to behavior of service
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ spec:
+ # TODO specify ServiceSpec
+ type: object
+ description: |
+ describe behavior of generated Service
+ More info: https://kubernetes.io/docs/concepts/services-networking/service/
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ useTemplates:
+ type: array
+ description: |
+ list of `ClickHouseInstallationTemplate` (chit) resource names which will merge with current `CHI`
+ manifest during render Kubernetes resources to create related ClickHouse clusters"
+ # nullable: true
+ items:
+ type: object
+ #required:
+ # - name
+ properties:
+ name:
+ type: string
+ description: "name of `ClickHouseInstallationTemplate` (chit) resource"
+ namespace:
+ type: string
+ description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clickhouse-operator`"
+ useType:
+ type: string
+ description: "optional, current strategy is only merge, and current `chi` settings have more priority than merged template `chit`"
+ enum:
+ # List useTypeXXX constants from model
+ - ""
+ - "merge"
+
+ clickhouseinstallationtemplates.yaml: |
+ # Template Parameters:
+ #
+ # KIND=ClickHouseInstallationTemplate
+ # SINGULAR=clickhouseinstallationtemplate
+ # PLURAL=clickhouseinstallationtemplates
+ # SHORT=chit
+ # OPERATOR_VERSION=0.25.6
+ #
+ apiVersion: apiextensions.k8s.io/v1
+ kind: CustomResourceDefinition
+ metadata:
+ name: clickhouseinstallationtemplates.clickhouse.altinity.com
+ labels:
+ clickhouse.altinity.com/chop: 0.25.6
+ spec:
+ group: clickhouse.altinity.com
+ scope: Namespaced
+ names:
+ kind: ClickHouseInstallationTemplate
+ singular: clickhouseinstallationtemplate
+ plural: clickhouseinstallationtemplates
+ shortNames:
+ - chit
+ versions:
+ - name: v1
+ served: true
+ storage: true
+ additionalPrinterColumns:
+ - name: version
+ type: string
+ description: Operator version
+ priority: 1 # show in wide view
+ jsonPath: .status.chop-version
+ - name: clusters
+ type: integer
+ description: Clusters count
+ jsonPath: .status.clusters
+ - name: shards
+ type: integer
+ description: Shards count
+ priority: 1 # show in wide view
+ jsonPath: .status.shards
+ - name: hosts
+ type: integer
+ description: Hosts count
+ jsonPath: .status.hosts
+ - name: taskID
+ type: string
+ description: TaskID
+ priority: 1 # show in wide view
+ jsonPath: .status.taskID
+ - name: status
+ type: string
+ description: Resource status
+ jsonPath: .status.status
+ - name: hosts-completed
+ type: integer
+ description: Completed hosts count
+ jsonPath: .status.hostsCompleted
+ - name: hosts-updated
+ type: integer
+ description: Updated hosts count
+ priority: 1 # show in wide view
+ jsonPath: .status.hostsUpdated
+ - name: hosts-added
+ type: integer
+ description: Added hosts count
+ priority: 1 # show in wide view
+ jsonPath: .status.hostsAdded
+ - name: hosts-deleted
+ type: integer
+ description: Hosts deleted count
+ priority: 1 # show in wide view
+ jsonPath: .status.hostsDeleted
+ - name: endpoint
+ type: string
+ description: Client access endpoint
+ priority: 1 # show in wide view
+ jsonPath: .status.endpoint
+ - name: age
+ type: date
+ description: Age of the resource
+ # Displayed in all priorities
+ jsonPath: .metadata.creationTimestamp
+ - name: suspend
+ type: string
+ description: Suspend reconciliation
+ # Displayed in all priorities
+ jsonPath: .spec.suspend
+ subresources:
+ status: {}
+ schema:
+ openAPIV3Schema:
+ description: "define a set of Kubernetes resources (StatefulSet, PVC, Service, ConfigMap) which describe behavior one or more clusters"
+ type: object
+ required:
+ - spec
+ properties:
+ apiVersion:
+ description: |
+ APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
+ type: string
+ kind:
+ description: |
+ Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ type: string
+ metadata:
+ type: object
+ status:
+ type: object
+ description: |
+ Status contains many fields like a normalized configuration, clickhouse-operator version, current action and all applied action list, current taskID and all applied taskIDs and other
+ properties:
+ chop-version:
+ type: string
+ description: "Operator version"
+ chop-commit:
+ type: string
+ description: "Operator git commit SHA"
+ chop-date:
+ type: string
+ description: "Operator build date"
+ chop-ip:
+ type: string
+ description: "IP address of the operator's pod which managed this resource"
+ clusters:
+ type: integer
+ minimum: 0
+ description: "Clusters count"
+ shards:
+ type: integer
+ minimum: 0
+ description: "Shards count"
+ replicas:
+ type: integer
+ minimum: 0
+ description: "Replicas count"
+ hosts:
+ type: integer
+ minimum: 0
+ description: "Hosts count"
+ status:
+ type: string
+ description: "Status"
+ taskID:
+ type: string
+ description: "Current task id"
+ taskIDsStarted:
+ type: array
+ description: "Started task ids"
+ nullable: true
+ items:
+ type: string
+ taskIDsCompleted:
+ type: array
+ description: "Completed task ids"
+ nullable: true
+ items:
+ type: string
+ action:
+ type: string
+ description: "Action"
+ actions:
+ type: array
+ description: "Actions"
+ nullable: true
+ items:
+ type: string
+ error:
+ type: string
+ description: "Last error"
+ errors:
+ type: array
+ description: "Errors"
+ nullable: true
+ items:
+ type: string
+ hostsUnchanged:
+ type: integer
+ minimum: 0
+ description: "Unchanged Hosts count"
+ hostsUpdated:
+ type: integer
+ minimum: 0
+ description: "Updated Hosts count"
+ hostsAdded:
+ type: integer
+ minimum: 0
+ description: "Added Hosts count"
+ hostsCompleted:
+ type: integer
+ minimum: 0
+ description: "Completed Hosts count"
+ hostsDeleted:
+ type: integer
+ minimum: 0
+ description: "Deleted Hosts count"
+ hostsDelete:
+ type: integer
+ minimum: 0
+ description: "About to delete Hosts count"
+ pods:
+ type: array
+ description: "Pods"
+ nullable: true
+ items:
+ type: string
+ pod-ips:
+ type: array
+ description: "Pod IPs"
+ nullable: true
+ items:
+ type: string
+ fqdns:
+ type: array
+ description: "Pods FQDNs"
+ nullable: true
+ items:
+ type: string
+ endpoint:
+ type: string
+ description: "Endpoint"
+ endpoints:
+ type: array
+ description: "All endpoints"
+ nullable: true
+ items:
+ type: string
+ generation:
+ type: integer
+ minimum: 0
+ description: "Generation"
+ normalized:
+ type: object
+ description: "Normalized resource requested"
+ nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ normalizedCompleted:
+ type: object
+ description: "Normalized resource completed"
+ nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ actionPlan:
+ type: object
+ description: "Action Plan"
+ nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ hostsWithTablesCreated:
+ type: array
+ description: "List of hosts with tables created by the operator"
+ nullable: true
+ items:
+ type: string
+ hostsWithReplicaCaughtUp:
+ type: array
+ description: "List of hosts with replica caught up"
+ nullable: true
+ items:
+ type: string
+ usedTemplates:
+ type: array
+ description: "List of templates used to build this CHI"
+ nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ items:
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ spec:
+ type: object
+ # x-kubernetes-preserve-unknown-fields: true
+ description: |
+ Specification of the desired behavior of one or more ClickHouse clusters
+ More info: https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md
+ properties:
+ taskID:
+ type: string
+ description: |
+ Allows to define custom taskID for CHI update and watch status of this update execution.
+ Displayed in all .status.taskID* fields.
+ By default (if not filled) every update of CHI manifest will generate random taskID
+ stop: &TypeStringBool
+ type: string
+ description: |
+ Allows to stop all ClickHouse clusters defined in a CHI.
+ Works as the following:
+ - When `stop` is `1` operator sets `Replicas: 0` in each StatefulSet. Thie leads to having all `Pods` and `Service` deleted. All PVCs are kept intact.
+ - When `stop` is `0` operator sets `Replicas: 1` and `Pod`s and `Service`s will created again and all retained PVCs will be attached to `Pod`s.
+ enum:
+ # List StringBoolXXX constants from model
+ - ""
+ - "0"
+ - "1"
+ - "False"
+ - "false"
+ - "True"
+ - "true"
+ - "No"
+ - "no"
+ - "Yes"
+ - "yes"
+ - "Off"
+ - "off"
+ - "On"
+ - "on"
+ - "Disable"
+ - "disable"
+ - "Enable"
+ - "enable"
+ - "Disabled"
+ - "disabled"
+ - "Enabled"
+ - "enabled"
+ restart:
+ type: string
+ description: |
+ In case 'RollingUpdate' specified, the operator will always restart ClickHouse pods during reconcile.
+ This options is used in rare cases when force restart is required and is typically removed after the use in order to avoid unneeded restarts.
+ enum:
+ - ""
+ - "RollingUpdate"
+ suspend:
+ !!merge <<: *TypeStringBool
+ description: |
+ Suspend reconciliation of resources managed by a ClickHouse Installation.
+ Works as the following:
+ - When `suspend` is `true` operator stops reconciling all resources.
+ - When `suspend` is `false` or not set, operator reconciles all resources.
+ troubleshoot:
+ !!merge <<: *TypeStringBool
+ description: |
+ Allows to troubleshoot Pods during CrashLoopBack state.
+ This may happen when wrong configuration applied, in this case `clickhouse-server` wouldn't start.
+ Command within ClickHouse container is modified with `sleep` in order to avoid quick restarts
+ and give time to troubleshoot via CLI.
+ Liveness and Readiness probes are disabled as well.
+ namespaceDomainPattern:
+ type: string
+ description: |
+ Custom domain pattern which will be used for DNS names of `Service` or `Pod`.
+ Typical use scenario - custom cluster domain in Kubernetes cluster
+ Example: %s.svc.my.test
+ templating:
+ type: object
+ # nullable: true
+ description: |
+ Optional, applicable inside ClickHouseInstallationTemplate only.
+ Defines current ClickHouseInstallationTemplate application options to target ClickHouseInstallation(s)."
+ properties:
+ policy:
+ type: string
+ description: |
+ When defined as `auto` inside ClickhouseInstallationTemplate, this ClickhouseInstallationTemplate
+ will be auto-added into ClickHouseInstallation, selectable by `chiSelector`.
+ Default value is `manual`, meaning ClickHouseInstallation should request this ClickhouseInstallationTemplate explicitly.
+ enum:
+ - ""
+ - "auto"
+ - "manual"
+ chiSelector:
+ type: object
+ description: "Optional, defines selector for ClickHouseInstallation(s) to be templated with ClickhouseInstallationTemplate"
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ reconciling: &TypeReconcile
+ type: object
+ description: "[OBSOLETED] Optional, allows tuning reconciling cycle for ClickhouseInstallation from clickhouse-operator side"
+ # nullable: true
+ properties:
+ policy:
+ type: string
+ description: |
+ DISCUSSED TO BE DEPRECATED
+ Syntax sugar
+ Overrides all three 'reconcile.host.wait.{exclude, queries, include}' values from the operator's config
+ Possible values:
+ - wait - should wait to exclude host, complete queries and include host back into the cluster
+ - nowait - should NOT wait to exclude host, complete queries and include host back into the cluster
+ enum:
+ - ""
+ - "wait"
+ - "nowait"
+ configMapPropagationTimeout:
+ type: integer
+ description: |
+ Timeout in seconds for `clickhouse-operator` to wait for modified `ConfigMap` to propagate into the `Pod`
+ More details: https://kubernetes.io/docs/concepts/configuration/configmap/#mounted-configmaps-are-updated-automatically
+ minimum: 0
+ maximum: 3600
+ cleanup:
+ type: object
+ description: "Optional, defines behavior for cleanup Kubernetes resources during reconcile cycle"
+ # nullable: true
+ properties:
+ unknownObjects:
+ type: object
+ description: |
+ Describes what clickhouse-operator should do with found Kubernetes resources which should be managed by clickhouse-operator,
+ but do not have `ownerReference` to any currently managed `ClickHouseInstallation` resource.
+ Default behavior is `Delete`"
+ # nullable: true
+ properties:
+ statefulSet: &TypeObjectsCleanup
+ type: string
+ description: "Behavior policy for unknown StatefulSet, `Delete` by default"
+ enum:
+ # List ObjectsCleanupXXX constants from model
+ - ""
+ - "Retain"
+ - "Delete"
+ pvc:
+ type: string
+ !!merge <<: *TypeObjectsCleanup
+ description: "Behavior policy for unknown PVC, `Delete` by default"
+ configMap:
+ !!merge <<: *TypeObjectsCleanup
+ description: "Behavior policy for unknown ConfigMap, `Delete` by default"
+ service:
+ !!merge <<: *TypeObjectsCleanup
+ description: "Behavior policy for unknown Service, `Delete` by default"
+ reconcileFailedObjects:
+ type: object
+ description: |
+ Describes what clickhouse-operator should do with Kubernetes resources which are failed during reconcile.
+ Default behavior is `Retain`"
+ # nullable: true
+ properties:
+ statefulSet:
+ !!merge <<: *TypeObjectsCleanup
+ description: "Behavior policy for failed StatefulSet, `Retain` by default"
+ pvc:
+ !!merge <<: *TypeObjectsCleanup
+ description: "Behavior policy for failed PVC, `Retain` by default"
+ configMap:
+ !!merge <<: *TypeObjectsCleanup
+ description: "Behavior policy for failed ConfigMap, `Retain` by default"
+ service:
+ !!merge <<: *TypeObjectsCleanup
+ description: "Behavior policy for failed Service, `Retain` by default"
+ macros:
+ type: object
+ description: "macros parameters"
+ properties:
+ sections:
+ type: object
+ description: "sections behaviour for macros"
+ properties:
+ users:
+ type: object
+ description: "sections behaviour for macros on users"
+ properties:
+ enabled:
+ !!merge <<: *TypeStringBool
+ description: "enabled or not"
+ profiles:
+ type: object
+ description: "sections behaviour for macros on profiles"
+ properties:
+ enabled:
+ !!merge <<: *TypeStringBool
+ description: "enabled or not"
+ quotas:
+ type: object
+ description: "sections behaviour for macros on quotas"
+ properties:
+ enabled:
+ !!merge <<: *TypeStringBool
+ description: "enabled or not"
+ settings:
+ type: object
+ description: "sections behaviour for macros on settings"
+ properties:
+ enabled:
+ !!merge <<: *TypeStringBool
+ description: "enabled or not"
+ files:
+ type: object
+ description: "sections behaviour for macros on files"
+ properties:
+ enabled:
+ !!merge <<: *TypeStringBool
+ description: "enabled or not"
+ runtime: &TypeReconcileRuntime
+ type: object
+ description: "runtime parameters for clickhouse-operator process which are used during reconcile cycle"
+ properties:
+ reconcileShardsThreadsNumber:
+ type: integer
+ minimum: 1
+ maximum: 65535
+ description: "The maximum number of cluster shards that may be reconciled in parallel, 1 by default"
+ reconcileShardsMaxConcurrencyPercent:
+ type: integer
+ minimum: 0
+ maximum: 100
+ description: "The maximum percentage of cluster shards that may be reconciled in parallel, 50 percent by default."
+ host: &TypeReconcileHost
+ type: object
+ description: |
+ Whether the operator during reconcile procedure should wait for a ClickHouse host:
+ - to be excluded from a ClickHouse cluster
+ - to complete all running queries
+ - to be included into a ClickHouse cluster
+ respectfully before moving forward
+ properties:
+ wait:
+ type: object
+ properties:
+ exclude:
+ !!merge <<: *TypeStringBool
+ queries:
+ !!merge <<: *TypeStringBool
+ description: "Whether the operator during reconcile procedure should wait for a ClickHouse host to complete all running queries"
+ include:
+ !!merge <<: *TypeStringBool
+ description: "Whether the operator during reconcile procedure should wait for a ClickHouse host to be included into a ClickHouse cluster"
+ replicas:
+ type: object
+ description: "Whether the operator during reconcile procedure should wait for replicas to catch-up"
+ properties:
+ all:
+ !!merge <<: *TypeStringBool
+ description: "Whether the operator during reconcile procedure should wait for all replicas to catch-up"
+ new:
+ !!merge <<: *TypeStringBool
+ description: "Whether the operator during reconcile procedure should wait for new replicas to catch-up"
+ delay:
+ type: integer
+ description: "replication max absolute delay to consider replica is not delayed"
+ probes:
+ type: object
+ description: "What probes the operator should wait during host launch procedure"
+ properties:
+ startup:
+ !!merge <<: *TypeStringBool
+ description: |
+ Whether the operator during host launch procedure should wait for startup probe to succeed.
+ In case probe is unspecified wait is assumed to be completed successfully.
+ Default option value is to do not wait.
+ readiness:
+ !!merge <<: *TypeStringBool
+ description: |
+ Whether the operator during host launch procedure should wait for ready probe to succeed.
+ In case probe is unspecified wait is assumed to be completed successfully.
+ Default option value is to wait.
+ drop:
+ type: object
+ properties:
+ replicas:
+ type: object
+ description: |
+ Whether the operator during reconcile procedure should drop replicas when replica is deleted or recreated
+ properties:
+ onDelete:
+ !!merge <<: *TypeStringBool
+ description: |
+ Whether the operator during reconcile procedure should drop replicas when replica is deleted
+ onLostVolume:
+ !!merge <<: *TypeStringBool
+ description: |
+ Whether the operator during reconcile procedure should drop replicas when replica volume is lost
+ active:
+ !!merge <<: *TypeStringBool
+ description: |
+ Whether the operator during reconcile procedure should drop active replicas when replica is deleted or recreated
+ reconcile:
+ !!merge <<: *TypeReconcile
+ description: "Optional, allows tuning reconciling cycle for ClickhouseInstallation from clickhouse-operator side"
+ defaults:
+ type: object
+ description: |
+ define default behavior for whole ClickHouseInstallation, some behavior can be re-define on cluster, shard and replica level
+ More info: https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#specdefaults
+ # nullable: true
+ properties:
+ replicasUseFQDN:
+ !!merge <<: *TypeStringBool
+ description: |
+ define should replicas be specified by FQDN in ``.
+ In case of "no" will use short hostname and clickhouse-server will use kubernetes default suffixes for DNS lookup
+ "no" by default
+ distributedDDL:
+ type: object
+ description: |
+ allows change `` settings
+ More info: https://clickhouse.tech/docs/en/operations/server-configuration-parameters/settings/#server-settings-distributed_ddl
+ # nullable: true
+ properties:
+ profile:
+ type: string
+ description: "Settings from this profile will be used to execute DDL queries"
+ storageManagement:
+ type: object
+ description: default storage management options
+ properties:
+ provisioner: &TypePVCProvisioner
+ type: string
+ description: "defines `PVC` provisioner - be it StatefulSet or the Operator"
+ enum:
+ - ""
+ - "StatefulSet"
+ - "Operator"
+ reclaimPolicy: &TypePVCReclaimPolicy
+ type: string
+ description: |
+ defines behavior of `PVC` deletion.
+ `Delete` by default, if `Retain` specified then `PVC` will be kept when deleting StatefulSet
+ enum:
+ - ""
+ - "Retain"
+ - "Delete"
+ templates: &TypeTemplateNames
+ type: object
+ description: "optional, configuration of the templates names which will use for generate Kubernetes resources according to one or more ClickHouse clusters described in current ClickHouseInstallation (chi) resource"
+ # nullable: true
+ properties:
+ hostTemplate:
+ type: string
+ description: "optional, template name from chi.spec.templates.hostTemplates, which will apply to configure every `clickhouse-server` instance during render ConfigMap resources which will mount into `Pod`"
+ podTemplate:
+ type: string
+ description: "optional, template name from chi.spec.templates.podTemplates, allows customization each `Pod` resource during render and reconcile each StatefulSet.spec resource described in `chi.spec.configuration.clusters`"
+ dataVolumeClaimTemplate:
+ type: string
+ description: "optional, template name from chi.spec.templates.volumeClaimTemplates, allows customization each `PVC` which will mount for clickhouse data directory in each `Pod` during render and reconcile every StatefulSet.spec resource described in `chi.spec.configuration.clusters`"
+ logVolumeClaimTemplate:
+ type: string
+ description: "optional, template name from chi.spec.templates.volumeClaimTemplates, allows customization each `PVC` which will mount for clickhouse log directory in each `Pod` during render and reconcile every StatefulSet.spec resource described in `chi.spec.configuration.clusters`"
+ serviceTemplate:
+ type: string
+ description: "optional, template name from chi.spec.templates.serviceTemplates. used for customization of the `Service` resource, created by `clickhouse-operator` to cover all clusters in whole `chi` resource"
+ serviceTemplates:
+ type: array
+ description: "optional, template names from chi.spec.templates.serviceTemplates. used for customization of the `Service` resources, created by `clickhouse-operator` to cover all clusters in whole `chi` resource"
+ nullable: true
+ items:
+ type: string
+ clusterServiceTemplate:
+ type: string
+ description: "optional, template name from chi.spec.templates.serviceTemplates, allows customization for each `Service` resource which will created by `clickhouse-operator` which cover each clickhouse cluster described in `chi.spec.configuration.clusters`"
+ shardServiceTemplate:
+ type: string
+ description: "optional, template name from chi.spec.templates.serviceTemplates, allows customization for each `Service` resource which will created by `clickhouse-operator` which cover each shard inside clickhouse cluster described in `chi.spec.configuration.clusters`"
+ replicaServiceTemplate:
+ type: string
+ description: "optional, template name from chi.spec.templates.serviceTemplates, allows customization for each `Service` resource which will created by `clickhouse-operator` which cover each replica inside each shard inside each clickhouse cluster described in `chi.spec.configuration.clusters`"
+ volumeClaimTemplate:
+ type: string
+ description: "optional, alias for dataVolumeClaimTemplate, template name from chi.spec.templates.volumeClaimTemplates, allows customization each `PVC` which will mount for clickhouse data directory in each `Pod` during render and reconcile every StatefulSet.spec resource described in `chi.spec.configuration.clusters`"
+ configuration:
+ type: object
+ description: "allows configure multiple aspects and behavior for `clickhouse-server` instance and also allows describe multiple `clickhouse-server` clusters inside one `chi` resource"
+ # nullable: true
+ properties:
+ zookeeper: &TypeZookeeperConfig
+ type: object
+ description: |
+ allows configure .. section in each `Pod` during generate `ConfigMap` which will mounted in `/etc/clickhouse-server/config.d/`
+ `clickhouse-operator` itself doesn't manage Zookeeper, please install Zookeeper separatelly look examples on https://github.com/Altinity/clickhouse-operator/tree/master/deploy/zookeeper/
+ currently, zookeeper (or clickhouse-keeper replacement) used for *ReplicatedMergeTree table engines and for `distributed_ddl`
+ More details: https://clickhouse.tech/docs/en/operations/server-configuration-parameters/settings/#server-settings_zookeeper
+ # nullable: true
+ properties:
+ nodes:
+ type: array
+ description: "describe every available zookeeper cluster node for interaction"
+ # nullable: true
+ items:
+ type: object
+ #required:
+ # - host
+ properties:
+ host:
+ type: string
+ description: "dns name or ip address for Zookeeper node"
+ port:
+ type: integer
+ description: "TCP port which used to connect to Zookeeper node"
+ minimum: 0
+ maximum: 65535
+ secure:
+ !!merge <<: *TypeStringBool
+ description: "if a secure connection to Zookeeper is required"
+ availabilityZone:
+ type: string
+ description: "availability zone for Zookeeper node"
+ session_timeout_ms:
+ type: integer
+ description: "session timeout during connect to Zookeeper"
+ operation_timeout_ms:
+ type: integer
+ description: "one operation timeout during Zookeeper transactions"
+ root:
+ type: string
+ description: "optional root znode path inside zookeeper to store ClickHouse related data (replication queue or distributed DDL)"
+ identity:
+ type: string
+ description: "optional access credentials string with `user:password` format used when use digest authorization in Zookeeper"
+ use_compression:
+ !!merge <<: *TypeStringBool
+ description: "Enables compression in Keeper protocol if set to true"
+ users:
+ type: object
+ description: |
+ allows configure .. section in each `Pod` during generate `ConfigMap` which will mount in `/etc/clickhouse-server/users.d/`
+ you can configure password hashed, authorization restrictions, database level security row filters etc.
+ More details: https://clickhouse.tech/docs/en/operations/settings/settings-users/
+ Your yaml code will convert to XML, see examples https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#specconfigurationusers
+
+ any key could contains `valueFrom` with `secretKeyRef` which allow pass password from kubernetes secrets
+ secret value will pass in `pod.spec.containers.evn`, and generate with from_env=XXX in XML in /etc/clickhouse-server/users.d/chop-generated-users.xml
+ it not allow automatically updates when updates `secret`, change spec.taskID for manually trigger reconcile cycle
+
+ look into https://github.com/Altinity/clickhouse-operator/blob/master/docs/chi-examples/05-settings-01-overview.yaml for examples
+
+ any key with prefix `k8s_secret_` shall has value with format namespace/secret/key or secret/key
+ in this case value from secret will write directly into XML tag during render *-usersd ConfigMap
+
+ any key with prefix `k8s_secret_env` shall has value with format namespace/secret/key or secret/key
+ in this case value from secret will write into environment variable and write to XML tag via from_env=XXX
+
+ look into https://github.com/Altinity/clickhouse-operator/blob/master/docs/chi-examples/05-settings-01-overview.yaml for examples
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ profiles:
+ type: object
+ description: |
+ allows configure .. section in each `Pod` during generate `ConfigMap` which will mount in `/etc/clickhouse-server/users.d/`
+ you can configure any aspect of settings profile
+ More details: https://clickhouse.tech/docs/en/operations/settings/settings-profiles/
+ Your yaml code will convert to XML, see examples https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#specconfigurationprofiles
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ quotas:
+ type: object
+ description: |
+ allows configure .. section in each `Pod` during generate `ConfigMap` which will mount in `/etc/clickhouse-server/users.d/`
+ you can configure any aspect of resource quotas
+ More details: https://clickhouse.tech/docs/en/operations/quotas/
+ Your yaml code will convert to XML, see examples https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#specconfigurationquotas
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ settings: &TypeSettings
+ type: object
+ description: |
+ allows configure `clickhouse-server` settings inside ... tag in each `Pod` during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/`
+ More details: https://clickhouse.tech/docs/en/operations/settings/settings/
+ Your yaml code will convert to XML, see examples https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#specconfigurationsettings
+
+ any key could contains `valueFrom` with `secretKeyRef` which allow pass password from kubernetes secrets
+ look into https://github.com/Altinity/clickhouse-operator/blob/master/docs/chi-examples/05-settings-01-overview.yaml for examples
+
+ secret value will pass in `pod.spec.env`, and generate with from_env=XXX in XML in /etc/clickhouse-server/config.d/chop-generated-settings.xml
+ it not allow automatically updates when updates `secret`, change spec.taskID for manually trigger reconcile cycle
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ files: &TypeFiles
+ type: object
+ description: |
+ allows define content of any setting file inside each `Pod` during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/`
+ every key in this object is the file name
+ every value in this object is the file content
+ you can use `!!binary |` and base64 for binary files, see details here https://yaml.org/type/binary.html
+ each key could contains prefix like {common}, {users}, {hosts} or config.d, users.d, conf.d, wrong prefixes will be ignored, subfolders also will be ignored
+ More details: https://github.com/Altinity/clickhouse-operator/blob/master/docs/chi-examples/05-settings-05-files-nested.yaml
+
+ any key could contains `valueFrom` with `secretKeyRef` which allow pass values from kubernetes secrets
+ secrets will mounted into pod as separate volume in /etc/clickhouse-server/secrets.d/
+ and will automatically update when update secret
+ it useful for pass SSL certificates from cert-manager or similar tool
+ look into https://github.com/Altinity/clickhouse-operator/blob/master/docs/chi-examples/05-settings-01-overview.yaml for examples
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ clusters:
+ type: array
+ description: |
+ describes clusters layout and allows change settings on cluster-level, shard-level and replica-level
+ every cluster is a set of StatefulSet, one StatefulSet contains only one Pod with `clickhouse-server`
+ all Pods will rendered in part of ClickHouse configs, mounted from ConfigMap as `/etc/clickhouse-server/config.d/chop-generated-remote_servers.xml`
+ Clusters will use for Distributed table engine, more details: https://clickhouse.tech/docs/en/engines/table-engines/special/distributed/
+ If `cluster` contains zookeeper settings (could be inherited from top `chi` level), when you can create *ReplicatedMergeTree tables
+ # nullable: true
+ items:
+ type: object
+ #required:
+ # - name
+ properties:
+ name:
+ type: string
+ description: "cluster name, used to identify set of servers and wide used during generate names of related Kubernetes resources"
+ minLength: 1
+ # See namePartClusterMaxLen const
+ maxLength: 15
+ pattern: "^[a-zA-Z0-9-]{0,15}$"
+ zookeeper:
+ !!merge <<: *TypeZookeeperConfig
+ description: |
+ optional, allows configure .. section in each `Pod` only in current ClickHouse cluster, during generate `ConfigMap` which will mounted in `/etc/clickhouse-server/config.d/`
+ override top-level `chi.spec.configuration.zookeeper` settings
+ settings:
+ !!merge <<: *TypeSettings
+ description: |
+ optional, allows configure `clickhouse-server` settings inside ... tag in each `Pod` only in one cluster during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/`
+ override top-level `chi.spec.configuration.settings`
+ More details: https://clickhouse.tech/docs/en/operations/settings/settings/
+ files:
+ !!merge <<: *TypeFiles
+ description: |
+ optional, allows define content of any setting file inside each `Pod` on current cluster during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/`
+ override top-level `chi.spec.configuration.files`
+ templates:
+ !!merge <<: *TypeTemplateNames
+ description: |
+ optional, configuration of the templates names which will use for generate Kubernetes resources according to selected cluster
+ override top-level `chi.spec.configuration.templates`
+ schemaPolicy:
+ type: object
+ description: |
+ describes how schema is propagated within replicas and shards
+ properties:
+ replica:
+ type: string
+ description: "how schema is propagated within a replica"
+ enum:
+ # List SchemaPolicyReplicaXXX constants from model
+ - ""
+ - "None"
+ - "All"
+ shard:
+ type: string
+ description: "how schema is propagated between shards"
+ enum:
+ # List SchemaPolicyShardXXX constants from model
+ - ""
+ - "None"
+ - "All"
+ - "DistributedTablesOnly"
+ insecure:
+ !!merge <<: *TypeStringBool
+ description: optional, open insecure ports for cluster, defaults to "yes"
+ secure:
+ !!merge <<: *TypeStringBool
+ description: optional, open secure ports for cluster
+ secret:
+ type: object
+ description: "optional, shared secret value to secure cluster communications"
+ properties:
+ auto:
+ !!merge <<: *TypeStringBool
+ description: "Auto-generate shared secret value to secure cluster communications"
+ value:
+ description: "Cluster shared secret value in plain text"
+ type: string
+ valueFrom:
+ description: "Cluster shared secret source"
+ type: object
+ properties:
+ secretKeyRef:
+ description: |
+ Selects a key of a secret in the clickhouse installation namespace.
+ Should not be used if value is not empty.
+ type: object
+ properties:
+ name:
+ description: |
+ Name of the referent. More info:
+ https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names
+ type: string
+ key:
+ description: The key of the secret to select from. Must be a valid secret key.
+ type: string
+ optional:
+ description: Specify whether the Secret or its key must be defined
+ type: boolean
+ required:
+ - name
+ - key
+ pdbManaged:
+ !!merge <<: *TypeStringBool
+ description: |
+ Specifies whether the Pod Disruption Budget (PDB) should be managed.
+ During the next installation, if PDB management is enabled, the operator will
+ attempt to retrieve any existing PDB. If none is found, it will create a new one
+ and initiate a reconciliation loop. If PDB management is disabled, the existing PDB
+ will remain intact, and the reconciliation loop will not be executed. By default,
+ PDB management is enabled.
+ pdbMaxUnavailable:
+ type: integer
+ description: |
+ Pod eviction is allowed if at most "pdbMaxUnavailable" pods are unavailable after the eviction,
+ i.e. even in absence of the evicted pod. For example, one can prevent all voluntary evictions
+ by specifying 0. This is a mutually exclusive setting with "minAvailable".
+ minimum: 0
+ maximum: 65535
+ reconcile:
+ type: object
+ description: "allow tuning reconciling process"
+ properties:
+ runtime:
+ !!merge <<: *TypeReconcileRuntime
+ host:
+ !!merge <<: *TypeReconcileHost
+ layout:
+ type: object
+ description: |
+ describe current cluster layout, how much shards in cluster, how much replica in shard
+ allows override settings on each shard and replica separatelly
+ # nullable: true
+ properties:
+ shardsCount:
+ type: integer
+ description: |
+ how much shards for current ClickHouse cluster will run in Kubernetes,
+ each shard contains shared-nothing part of data and contains set of replicas,
+ cluster contains 1 shard by default"
+ replicasCount:
+ type: integer
+ description: |
+ how much replicas in each shards for current cluster will run in Kubernetes,
+ each replica is a separate `StatefulSet` which contains only one `Pod` with `clickhouse-server` instance,
+ every shard contains 1 replica by default"
+ shards:
+ type: array
+ description: |
+ optional, allows override top-level `chi.spec.configuration`, cluster-level
+ `chi.spec.configuration.clusters` settings for each shard separately,
+ use it only if you fully understand what you do"
+ # nullable: true
+ items:
+ type: object
+ properties:
+ name:
+ type: string
+ description: "optional, by default shard name is generated, but you can override it and setup custom name"
+ minLength: 1
+ # See namePartShardMaxLen const
+ maxLength: 15
+ pattern: "^[a-zA-Z0-9-]{0,15}$"
+ definitionType:
+ type: string
+ description: "DEPRECATED - to be removed soon"
+ weight:
+ type: integer
+ description: |
+ optional, 1 by default, allows setup shard setting which will use during insert into tables with `Distributed` engine,
+ will apply in inside ConfigMap which will mount in /etc/clickhouse-server/config.d/chop-generated-remote_servers.xml
+ More details: https://clickhouse.tech/docs/en/engines/table-engines/special/distributed/
+ internalReplication:
+ !!merge <<: *TypeStringBool
+ description: |
+ optional, `true` by default when `chi.spec.configuration.clusters[].layout.ReplicaCount` > 1 and 0 otherwise
+ allows setup setting which will use during insert into tables with `Distributed` engine for insert only in one live replica and other replicas will download inserted data during replication,
+ will apply in inside ConfigMap which will mount in /etc/clickhouse-server/config.d/chop-generated-remote_servers.xml
+ More details: https://clickhouse.tech/docs/en/engines/table-engines/special/distributed/
+ settings:
+ !!merge <<: *TypeSettings
+ description: |
+ optional, allows configure `clickhouse-server` settings inside ... tag in each `Pod` only in one shard during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/`
+ override top-level `chi.spec.configuration.settings` and cluster-level `chi.spec.configuration.clusters.settings`
+ More details: https://clickhouse.tech/docs/en/operations/settings/settings/
+ files:
+ !!merge <<: *TypeFiles
+ description: |
+ optional, allows define content of any setting file inside each `Pod` only in one shard during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/`
+ override top-level `chi.spec.configuration.files` and cluster-level `chi.spec.configuration.clusters.files`
+ templates:
+ !!merge <<: *TypeTemplateNames
+ description: |
+ optional, configuration of the templates names which will use for generate Kubernetes resources according to selected shard
+ override top-level `chi.spec.configuration.templates` and cluster-level `chi.spec.configuration.clusters.templates`
+ replicasCount:
+ type: integer
+ description: |
+ optional, how much replicas in selected shard for selected ClickHouse cluster will run in Kubernetes, each replica is a separate `StatefulSet` which contains only one `Pod` with `clickhouse-server` instance,
+ shard contains 1 replica by default
+ override cluster-level `chi.spec.configuration.clusters.layout.replicasCount`
+ minimum: 1
+ replicas:
+ type: array
+ description: |
+ optional, allows override behavior for selected replicas from cluster-level `chi.spec.configuration.clusters` and shard-level `chi.spec.configuration.clusters.layout.shards`
+ # nullable: true
+ items:
+ # Host
+ type: object
+ properties:
+ name:
+ type: string
+ description: "optional, by default replica name is generated, but you can override it and setup custom name"
+ minLength: 1
+ # See namePartReplicaMaxLen const
+ maxLength: 15
+ pattern: "^[a-zA-Z0-9-]{0,15}$"
+ insecure:
+ !!merge <<: *TypeStringBool
+ description: |
+ optional, open insecure ports for cluster, defaults to "yes"
+ secure:
+ !!merge <<: *TypeStringBool
+ description: |
+ optional, open secure ports
+ tcpPort:
+ type: integer
+ description: |
+ optional, setup `Pod.spec.containers.ports` with name `tcp` for selected replica, override `chi.spec.templates.hostTemplates.spec.tcpPort`
+ allows connect to `clickhouse-server` via TCP Native protocol via kubernetes `Service`
+ minimum: 1
+ maximum: 65535
+ tlsPort:
+ type: integer
+ minimum: 1
+ maximum: 65535
+ httpPort:
+ type: integer
+ description: |
+ optional, setup `Pod.spec.containers.ports` with name `http` for selected replica, override `chi.spec.templates.hostTemplates.spec.httpPort`
+ allows connect to `clickhouse-server` via HTTP protocol via kubernetes `Service`
+ minimum: 1
+ maximum: 65535
+ httpsPort:
+ type: integer
+ minimum: 1
+ maximum: 65535
+ interserverHTTPPort:
+ type: integer
+ description: |
+ optional, setup `Pod.spec.containers.ports` with name `interserver` for selected replica, override `chi.spec.templates.hostTemplates.spec.interserverHTTPPort`
+ allows connect between replicas inside same shard during fetch replicated data parts HTTP protocol
+ minimum: 1
+ maximum: 65535
+ settings:
+ !!merge <<: *TypeSettings
+ description: |
+ optional, allows configure `clickhouse-server` settings inside ... tag in `Pod` only in one replica during generate `ConfigMap` which will mount in `/etc/clickhouse-server/conf.d/`
+ override top-level `chi.spec.configuration.settings`, cluster-level `chi.spec.configuration.clusters.settings` and shard-level `chi.spec.configuration.clusters.layout.shards.settings`
+ More details: https://clickhouse.tech/docs/en/operations/settings/settings/
+ files:
+ !!merge <<: *TypeFiles
+ description: |
+ optional, allows define content of any setting file inside `Pod` only in one replica during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/`
+ override top-level `chi.spec.configuration.files`, cluster-level `chi.spec.configuration.clusters.files` and shard-level `chi.spec.configuration.clusters.layout.shards.files`
+ templates:
+ !!merge <<: *TypeTemplateNames
+ description: |
+ optional, configuration of the templates names which will use for generate Kubernetes resources according to selected replica
+ override top-level `chi.spec.configuration.templates`, cluster-level `chi.spec.configuration.clusters.templates` and shard-level `chi.spec.configuration.clusters.layout.shards.templates`
+ replicas:
+ type: array
+ description: "optional, allows override top-level `chi.spec.configuration` and cluster-level `chi.spec.configuration.clusters` configuration for each replica and each shard relates to selected replica, use it only if you fully understand what you do"
+ # nullable: true
+ items:
+ type: object
+ properties:
+ name:
+ type: string
+ description: "optional, by default replica name is generated, but you can override it and setup custom name"
+ minLength: 1
+ # See namePartShardMaxLen const
+ maxLength: 15
+ pattern: "^[a-zA-Z0-9-]{0,15}$"
+ settings:
+ !!merge <<: *TypeSettings
+ description: |
+ optional, allows configure `clickhouse-server` settings inside ... tag in `Pod` only in one replica during generate `ConfigMap` which will mount in `/etc/clickhouse-server/conf.d/`
+ override top-level `chi.spec.configuration.settings`, cluster-level `chi.spec.configuration.clusters.settings` and will ignore if shard-level `chi.spec.configuration.clusters.layout.shards` present
+ More details: https://clickhouse.tech/docs/en/operations/settings/settings/
+ files:
+ !!merge <<: *TypeFiles
+ description: |
+ optional, allows define content of any setting file inside each `Pod` only in one replica during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/`
+ override top-level `chi.spec.configuration.files` and cluster-level `chi.spec.configuration.clusters.files`, will ignore if `chi.spec.configuration.clusters.layout.shards` presents
+ templates:
+ !!merge <<: *TypeTemplateNames
+ description: |
+ optional, configuration of the templates names which will use for generate Kubernetes resources according to selected replica
+ override top-level `chi.spec.configuration.templates`, cluster-level `chi.spec.configuration.clusters.templates`
+ shardsCount:
+ type: integer
+ description: "optional, count of shards related to current replica, you can override each shard behavior on low-level `chi.spec.configuration.clusters.layout.replicas.shards`"
+ minimum: 1
+ shards:
+ type: array
+ description: "optional, list of shards related to current replica, will ignore if `chi.spec.configuration.clusters.layout.shards` presents"
+ # nullable: true
+ items:
+ # Host
+ type: object
+ properties:
+ name:
+ type: string
+ description: "optional, by default shard name is generated, but you can override it and setup custom name"
+ minLength: 1
+ # See namePartReplicaMaxLen const
+ maxLength: 15
+ pattern: "^[a-zA-Z0-9-]{0,15}$"
+ insecure:
+ !!merge <<: *TypeStringBool
+ description: |
+ optional, open insecure ports for cluster, defaults to "yes"
+ secure:
+ !!merge <<: *TypeStringBool
+ description: |
+ optional, open secure ports
+ tcpPort:
+ type: integer
+ description: |
+ optional, setup `Pod.spec.containers.ports` with name `tcp` for selected shard, override `chi.spec.templates.hostTemplates.spec.tcpPort`
+ allows connect to `clickhouse-server` via TCP Native protocol via kubernetes `Service`
+ minimum: 1
+ maximum: 65535
+ tlsPort:
+ type: integer
+ minimum: 1
+ maximum: 65535
+ httpPort:
+ type: integer
+ description: |
+ optional, setup `Pod.spec.containers.ports` with name `http` for selected shard, override `chi.spec.templates.hostTemplates.spec.httpPort`
+ allows connect to `clickhouse-server` via HTTP protocol via kubernetes `Service`
+ minimum: 1
+ maximum: 65535
+ httpsPort:
+ type: integer
+ minimum: 1
+ maximum: 65535
+ interserverHTTPPort:
+ type: integer
+ description: |
+ optional, setup `Pod.spec.containers.ports` with name `interserver` for selected shard, override `chi.spec.templates.hostTemplates.spec.interserverHTTPPort`
+ allows connect between replicas inside same shard during fetch replicated data parts HTTP protocol
+ minimum: 1
+ maximum: 65535
+ settings:
+ !!merge <<: *TypeSettings
+ description: |
+ optional, allows configure `clickhouse-server` settings inside ... tag in `Pod` only in one shard related to current replica during generate `ConfigMap` which will mount in `/etc/clickhouse-server/conf.d/`
+ override top-level `chi.spec.configuration.settings`, cluster-level `chi.spec.configuration.clusters.settings` and replica-level `chi.spec.configuration.clusters.layout.replicas.settings`
+ More details: https://clickhouse.tech/docs/en/operations/settings/settings/
+ files:
+ !!merge <<: *TypeFiles
+ description: |
+ optional, allows define content of any setting file inside each `Pod` only in one shard related to current replica during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/`
+ override top-level `chi.spec.configuration.files` and cluster-level `chi.spec.configuration.clusters.files`, will ignore if `chi.spec.configuration.clusters.layout.shards` presents
+ templates:
+ !!merge <<: *TypeTemplateNames
+ description: |
+ optional, configuration of the templates names which will use for generate Kubernetes resources according to selected replica
+ override top-level `chi.spec.configuration.templates`, cluster-level `chi.spec.configuration.clusters.templates`, replica-level `chi.spec.configuration.clusters.layout.replicas.templates`
+ templates:
+ type: object
+ description: "allows define templates which will use for render Kubernetes resources like StatefulSet, ConfigMap, Service, PVC, by default, clickhouse-operator have own templates, but you can override it"
+ # nullable: true
+ properties:
+ hostTemplates:
+ type: array
+ description: "hostTemplate will use during apply to generate `clickhose-server` config files"
+ # nullable: true
+ items:
+ type: object
+ #required:
+ # - name
+ properties:
+ name:
+ description: "template name, could use to link inside top-level `chi.spec.defaults.templates.hostTemplate`, cluster-level `chi.spec.configuration.clusters.templates.hostTemplate`, shard-level `chi.spec.configuration.clusters.layout.shards.temlates.hostTemplate`, replica-level `chi.spec.configuration.clusters.layout.replicas.templates.hostTemplate`"
+ type: string
+ portDistribution:
+ type: array
+ description: "define how will distribute numeric values of named ports in `Pod.spec.containers.ports` and clickhouse-server configs"
+ # nullable: true
+ items:
+ type: object
+ #required:
+ # - type
+ properties:
+ type:
+ type: string
+ description: "type of distribution, when `Unspecified` (default value) then all listen ports on clickhouse-server configuration in all Pods will have the same value, when `ClusterScopeIndex` then ports will increment to offset from base value depends on shard and replica index inside cluster with combination of `chi.spec.templates.podTemlates.spec.HostNetwork` it allows setup ClickHouse cluster inside Kubernetes and provide access via external network bypass Kubernetes internal network"
+ enum:
+ # List PortDistributionXXX constants
+ - ""
+ - "Unspecified"
+ - "ClusterScopeIndex"
+ spec:
+ # Host
+ type: object
+ properties:
+ name:
+ type: string
+ description: "by default, hostname will generate, but this allows define custom name for each `clickhouse-server`"
+ minLength: 1
+ # See namePartReplicaMaxLen const
+ maxLength: 15
+ pattern: "^[a-zA-Z0-9-]{0,15}$"
+ insecure:
+ !!merge <<: *TypeStringBool
+ description: |
+ optional, open insecure ports for cluster, defaults to "yes"
+ secure:
+ !!merge <<: *TypeStringBool
+ description: |
+ optional, open secure ports
+ tcpPort:
+ type: integer
+ description: |
+ optional, setup `tcp_port` inside `clickhouse-server` settings for each Pod where current template will apply
+ if specified, should have equal value with `chi.spec.templates.podTemplates.spec.containers.ports[name=tcp]`
+ More info: https://clickhouse.tech/docs/en/interfaces/tcp/
+ minimum: 1
+ maximum: 65535
+ tlsPort:
+ type: integer
+ minimum: 1
+ maximum: 65535
+ httpPort:
+ type: integer
+ description: |
+ optional, setup `http_port` inside `clickhouse-server` settings for each Pod where current template will apply
+ if specified, should have equal value with `chi.spec.templates.podTemplates.spec.containers.ports[name=http]`
+ More info: https://clickhouse.tech/docs/en/interfaces/http/
+ minimum: 1
+ maximum: 65535
+ httpsPort:
+ type: integer
+ minimum: 1
+ maximum: 65535
+ interserverHTTPPort:
+ type: integer
+ description: |
+ optional, setup `interserver_http_port` inside `clickhouse-server` settings for each Pod where current template will apply
+ if specified, should have equal value with `chi.spec.templates.podTemplates.spec.containers.ports[name=interserver]`
+ More info: https://clickhouse.tech/docs/en/operations/server-configuration-parameters/settings/#interserver-http-port
+ minimum: 1
+ maximum: 65535
+ settings:
+ !!merge <<: *TypeSettings
+ description: |
+ optional, allows configure `clickhouse-server` settings inside ... tag in each `Pod` where this template will apply during generate `ConfigMap` which will mount in `/etc/clickhouse-server/conf.d/`
+ More details: https://clickhouse.tech/docs/en/operations/settings/settings/
+ files:
+ !!merge <<: *TypeFiles
+ description: |
+ optional, allows define content of any setting file inside each `Pod` where this template will apply during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/`
+ templates:
+ !!merge <<: *TypeTemplateNames
+ description: "be careful, this part of CRD allows override template inside template, don't use it if you don't understand what you do"
+ podTemplates:
+ type: array
+ description: |
+ podTemplate will use during render `Pod` inside `StatefulSet.spec` and allows define rendered `Pod.spec`, pod scheduling distribution and pod zone
+ More information: https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#spectemplatespodtemplates
+ # nullable: true
+ items:
+ type: object
+ #required:
+ # - name
+ properties:
+ name:
+ type: string
+ description: "template name, could use to link inside top-level `chi.spec.defaults.templates.podTemplate`, cluster-level `chi.spec.configuration.clusters.templates.podTemplate`, shard-level `chi.spec.configuration.clusters.layout.shards.temlates.podTemplate`, replica-level `chi.spec.configuration.clusters.layout.replicas.templates.podTemplate`"
+ generateName:
+ type: string
+ description: "allows define format for generated `Pod` name, look to https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#spectemplatesservicetemplates for details about available template variables"
+ zone:
+ type: object
+ description: "allows define custom zone name and will separate ClickHouse `Pods` between nodes, shortcut for `chi.spec.templates.podTemplates.spec.affinity.podAntiAffinity`"
+ #required:
+ # - values
+ properties:
+ key:
+ type: string
+ description: "optional, if defined, allows select kubernetes nodes by label with `name` equal `key`"
+ values:
+ type: array
+ description: "optional, if defined, allows select kubernetes nodes by label with `value` in `values`"
+ # nullable: true
+ items:
+ type: string
+ distribution:
+ type: string
+ description: "DEPRECATED, shortcut for `chi.spec.templates.podTemplates.spec.affinity.podAntiAffinity`"
+ enum:
+ - ""
+ - "Unspecified"
+ - "OnePerHost"
+ podDistribution:
+ type: array
+ description: "define ClickHouse Pod distribution policy between Kubernetes Nodes inside Shard, Replica, Namespace, CHI, another ClickHouse cluster"
+ # nullable: true
+ items:
+ type: object
+ #required:
+ # - type
+ properties:
+ type:
+ type: string
+ description: "you can define multiple affinity policy types"
+ enum:
+ # List PodDistributionXXX constants
+ - ""
+ - "Unspecified"
+ - "ClickHouseAntiAffinity"
+ - "ShardAntiAffinity"
+ - "ReplicaAntiAffinity"
+ - "AnotherNamespaceAntiAffinity"
+ - "AnotherClickHouseInstallationAntiAffinity"
+ - "AnotherClusterAntiAffinity"
+ - "MaxNumberPerNode"
+ - "NamespaceAffinity"
+ - "ClickHouseInstallationAffinity"
+ - "ClusterAffinity"
+ - "ShardAffinity"
+ - "ReplicaAffinity"
+ - "PreviousTailAffinity"
+ - "CircularReplication"
+ scope:
+ type: string
+ description: "scope for apply each podDistribution"
+ enum:
+ # list PodDistributionScopeXXX constants
+ - ""
+ - "Unspecified"
+ - "Shard"
+ - "Replica"
+ - "Cluster"
+ - "ClickHouseInstallation"
+ - "Namespace"
+ number:
+ type: integer
+ description: "define, how much ClickHouse Pods could be inside selected scope with selected distribution type"
+ minimum: 0
+ maximum: 65535
+ topologyKey:
+ type: string
+ description: |
+ use for inter-pod affinity look to `pod.spec.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.topologyKey`,
+ more info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity"
+ metadata:
+ type: object
+ description: |
+ allows pass standard object's metadata from template to Pod
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ spec:
+ # TODO specify PodSpec
+ type: object
+ description: "allows define whole Pod.spec inside StaefulSet.spec, look to https://kubernetes.io/docs/concepts/workloads/pods/#pod-templates for details"
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ volumeClaimTemplates:
+ type: array
+ description: |
+ allows define template for rendering `PVC` kubernetes resource, which would use inside `Pod` for mount clickhouse `data`, clickhouse `logs` or something else
+ # nullable: true
+ items:
+ type: object
+ #required:
+ # - name
+ # - spec
+ properties:
+ name:
+ type: string
+ description: |
+ template name, could use to link inside
+ top-level `chi.spec.defaults.templates.dataVolumeClaimTemplate` or `chi.spec.defaults.templates.logVolumeClaimTemplate`,
+ cluster-level `chi.spec.configuration.clusters.templates.dataVolumeClaimTemplate` or `chi.spec.configuration.clusters.templates.logVolumeClaimTemplate`,
+ shard-level `chi.spec.configuration.clusters.layout.shards.temlates.dataVolumeClaimTemplate` or `chi.spec.configuration.clusters.layout.shards.temlates.logVolumeClaimTemplate`
+ replica-level `chi.spec.configuration.clusters.layout.replicas.templates.dataVolumeClaimTemplate` or `chi.spec.configuration.clusters.layout.replicas.templates.logVolumeClaimTemplate`
+ provisioner: *TypePVCProvisioner
+ reclaimPolicy: *TypePVCReclaimPolicy
+ metadata:
+ type: object
+ description: |
+ allows to pass standard object's metadata from template to PVC
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ spec:
+ type: object
+ description: |
+ allows define all aspects of `PVC` resource
+ More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ serviceTemplates:
+ type: array
+ description: |
+ allows define template for rendering `Service` which would get endpoint from Pods which scoped chi-wide, cluster-wide, shard-wide, replica-wide level
+ # nullable: true
+ items:
+ type: object
+ #required:
+ # - name
+ # - spec
+ properties:
+ name:
+ type: string
+ description: |
+ template name, could use to link inside
+ chi-level `chi.spec.defaults.templates.serviceTemplate`
+ cluster-level `chi.spec.configuration.clusters.templates.clusterServiceTemplate`
+ shard-level `chi.spec.configuration.clusters.layout.shards.temlates.shardServiceTemplate`
+ replica-level `chi.spec.configuration.clusters.layout.replicas.templates.replicaServiceTemplate` or `chi.spec.configuration.clusters.layout.shards.replicas.replicaServiceTemplate`
+ generateName:
+ type: string
+ description: |
+ allows define format for generated `Service` name,
+ look to https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#spectemplatesservicetemplates
+ for details about available template variables"
+ metadata:
+ # TODO specify ObjectMeta
+ type: object
+ description: |
+ allows pass standard object's metadata from template to Service
+ Could be use for define specificly for Cloud Provider metadata which impact to behavior of service
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ spec:
+ # TODO specify ServiceSpec
+ type: object
+ description: |
+ describe behavior of generated Service
+ More info: https://kubernetes.io/docs/concepts/services-networking/service/
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ useTemplates:
+ type: array
+ description: |
+ list of `ClickHouseInstallationTemplate` (chit) resource names which will merge with current `CHI`
+ manifest during render Kubernetes resources to create related ClickHouse clusters"
+ # nullable: true
+ items:
+ type: object
+ #required:
+ # - name
+ properties:
+ name:
+ type: string
+ description: "name of `ClickHouseInstallationTemplate` (chit) resource"
+ namespace:
+ type: string
+ description: "Kubernetes namespace where need search `chit` resource, depending on `watchNamespaces` settings in `clickhouse-operator`"
+ useType:
+ type: string
+ description: "optional, current strategy is only merge, and current `chi` settings have more priority than merged template `chit`"
+ enum:
+ # List useTypeXXX constants from model
+ - ""
+ - "merge"
+
+ clickhousekeeperinstallations.yaml: |
+ # Template Parameters:
+ #
+ # OPERATOR_VERSION=0.25.6
+ #
+ apiVersion: apiextensions.k8s.io/v1
+ kind: CustomResourceDefinition
+ metadata:
+ name: clickhousekeeperinstallations.clickhouse-keeper.altinity.com
+ labels:
+ clickhouse-keeper.altinity.com/chop: 0.25.6
+ spec:
+ group: clickhouse-keeper.altinity.com
+ scope: Namespaced
+ names:
+ kind: ClickHouseKeeperInstallation
+ singular: clickhousekeeperinstallation
+ plural: clickhousekeeperinstallations
+ shortNames:
+ - chk
+ versions:
+ - name: v1
+ served: true
+ storage: true
+ additionalPrinterColumns:
+ - name: version
+ type: string
+ description: Operator version
+ priority: 1 # show in wide view
+ jsonPath: .status.chop-version
+ - name: clusters
+ type: integer
+ description: Clusters count
+ jsonPath: .status.clusters
+ - name: shards
+ type: integer
+ description: Shards count
+ priority: 1 # show in wide view
+ jsonPath: .status.shards
+ - name: hosts
+ type: integer
+ description: Hosts count
+ jsonPath: .status.hosts
+ - name: taskID
+ type: string
+ description: TaskID
+ priority: 1 # show in wide view
+ jsonPath: .status.taskID
+ - name: status
+ type: string
+ description: Resource status
+ jsonPath: .status.status
+ - name: hosts-unchanged
+ type: integer
+ description: Unchanged hosts count
+ priority: 1 # show in wide view
+ jsonPath: .status.hostsUnchanged
+ - name: hosts-updated
+ type: integer
+ description: Updated hosts count
+ priority: 1 # show in wide view
+ jsonPath: .status.hostsUpdated
+ - name: hosts-added
+ type: integer
+ description: Added hosts count
+ priority: 1 # show in wide view
+ jsonPath: .status.hostsAdded
+ - name: hosts-completed
+ type: integer
+ description: Completed hosts count
+ jsonPath: .status.hostsCompleted
+ - name: hosts-deleted
+ type: integer
+ description: Hosts deleted count
+ priority: 1 # show in wide view
+ jsonPath: .status.hostsDeleted
+ - name: hosts-delete
+ type: integer
+ description: Hosts to be deleted count
+ priority: 1 # show in wide view
+ jsonPath: .status.hostsDelete
+ - name: endpoint
+ type: string
+ description: Client access endpoint
+ priority: 1 # show in wide view
+ jsonPath: .status.endpoint
+ - name: age
+ type: date
+ description: Age of the resource
+ # Displayed in all priorities
+ jsonPath: .metadata.creationTimestamp
+ - name: suspend
+ type: string
+ description: Suspend reconciliation
+ # Displayed in all priorities
+ jsonPath: .spec.suspend
+ subresources:
+ status: {}
+ schema:
+ openAPIV3Schema:
+ description: "define a set of Kubernetes resources (StatefulSet, PVC, Service, ConfigMap) which describe behavior one or more clusters"
+ type: object
+ required:
+ - spec
+ properties:
+ apiVersion:
+ description: |
+ APIVersion defines the versioned schema of this representation
+ of an object. Servers should convert recognized schemas to the latest
+ internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
+ type: string
+ kind:
+ description: |
+ Kind is a string value representing the REST resource this
+ object represents. Servers may infer this from the endpoint the client
+ submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ type: string
+ metadata:
+ type: object
+ status:
+ type: object
+ description: |
+ Status contains many fields like a normalized configuration, clickhouse-operator version, current action and all applied action list, current taskID and all applied taskIDs and other
+ properties:
+ chop-version:
+ type: string
+ description: "Operator version"
+ chop-commit:
+ type: string
+ description: "Operator git commit SHA"
+ chop-date:
+ type: string
+ description: "Operator build date"
+ chop-ip:
+ type: string
+ description: "IP address of the operator's pod which managed this resource"
+ clusters:
+ type: integer
+ minimum: 0
+ description: "Clusters count"
+ shards:
+ type: integer
+ minimum: 0
+ description: "Shards count"
+ replicas:
+ type: integer
+ minimum: 0
+ description: "Replicas count"
+ hosts:
+ type: integer
+ minimum: 0
+ description: "Hosts count"
+ status:
+ type: string
+ description: "Status"
+ taskID:
+ type: string
+ description: "Current task id"
+ taskIDsStarted:
+ type: array
+ description: "Started task ids"
+ nullable: true
+ items:
+ type: string
+ taskIDsCompleted:
+ type: array
+ description: "Completed task ids"
+ nullable: true
+ items:
+ type: string
+ action:
+ type: string
+ description: "Action"
+ actions:
+ type: array
+ description: "Actions"
+ nullable: true
+ items:
+ type: string
+ error:
+ type: string
+ description: "Last error"
+ errors:
+ type: array
+ description: "Errors"
+ nullable: true
+ items:
+ type: string
+ hostsUnchanged:
+ type: integer
+ minimum: 0
+ description: "Unchanged Hosts count"
+ hostsUpdated:
+ type: integer
+ minimum: 0
+ description: "Updated Hosts count"
+ hostsAdded:
+ type: integer
+ minimum: 0
+ description: "Added Hosts count"
+ hostsCompleted:
+ type: integer
+ minimum: 0
+ description: "Completed Hosts count"
+ hostsDeleted:
+ type: integer
+ minimum: 0
+ description: "Deleted Hosts count"
+ hostsDelete:
+ type: integer
+ minimum: 0
+ description: "About to delete Hosts count"
+ pods:
+ type: array
+ description: "Pods"
+ nullable: true
+ items:
+ type: string
+ pod-ips:
+ type: array
+ description: "Pod IPs"
+ nullable: true
+ items:
+ type: string
+ fqdns:
+ type: array
+ description: "Pods FQDNs"
+ nullable: true
+ items:
+ type: string
+ endpoint:
+ type: string
+ description: "Endpoint"
+ endpoints:
+ type: array
+ description: "All endpoints"
+ nullable: true
+ items:
+ type: string
+ generation:
+ type: integer
+ minimum: 0
+ description: "Generation"
+ normalized:
+ type: object
+ description: "Normalized resource requested"
+ x-kubernetes-preserve-unknown-fields: true
+ normalizedCompleted:
+ type: object
+ description: "Normalized resource completed"
+ x-kubernetes-preserve-unknown-fields: true
+ hostsWithTablesCreated:
+ type: array
+ description: "List of hosts with tables created by the operator"
+ nullable: true
+ items:
+ type: string
+ hostsWithReplicaCaughtUp:
+ type: array
+ description: "List of hosts with replica caught up"
+ nullable: true
+ items:
+ type: string
+ usedTemplates:
+ type: array
+ description: "List of templates used to build this CHI"
+ nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ items:
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ spec:
+ type: object
+ # x-kubernetes-preserve-unknown-fields: true
+ description: |
+ Specification of the desired behavior of one or more ClickHouse clusters
+ More info: https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md
+ properties:
+ taskID:
+ type: string
+ description: |
+ Allows to define custom taskID for CHI update and watch status of this update execution.
+ Displayed in all .status.taskID* fields.
+ By default (if not filled) every update of CHI manifest will generate random taskID
+ stop: &TypeStringBool
+ type: string
+ description: |
+ Allows to stop all ClickHouse clusters defined in a CHI.
+ Works as the following:
+ - When `stop` is `1` operator sets `Replicas: 0` in each StatefulSet. Thie leads to having all `Pods` and `Service` deleted. All PVCs are kept intact.
+ - When `stop` is `0` operator sets `Replicas: 1` and `Pod`s and `Service`s will created again and all retained PVCs will be attached to `Pod`s.
+ enum:
+ # List StringBoolXXX constants from model
+ - ""
+ - "0"
+ - "1"
+ - "False"
+ - "false"
+ - "True"
+ - "true"
+ - "No"
+ - "no"
+ - "Yes"
+ - "yes"
+ - "Off"
+ - "off"
+ - "On"
+ - "on"
+ - "Disable"
+ - "disable"
+ - "Enable"
+ - "enable"
+ - "Disabled"
+ - "disabled"
+ - "Enabled"
+ - "enabled"
+ suspend:
+ !!merge <<: *TypeStringBool
+ description: |
+ Suspend reconciliation of resources managed by a ClickHouse Keeper.
+ Works as the following:
+ - When `suspend` is `true` operator stops reconciling all resources.
+ - When `suspend` is `false` or not set, operator reconciles all resources.
+ namespaceDomainPattern:
+ type: string
+ description: |
+ Custom domain pattern which will be used for DNS names of `Service` or `Pod`.
+ Typical use scenario - custom cluster domain in Kubernetes cluster
+ Example: %s.svc.my.test
+ reconciling:
+ type: object
+ description: "Optional, allows tuning reconciling cycle for ClickhouseInstallation from clickhouse-operator side"
+ # nullable: true
+ properties:
+ policy:
+ type: string
+ description: |
+ DISCUSSED TO BE DEPRECATED
+ Syntax sugar
+ Overrides all three 'reconcile.host.wait.{exclude, queries, include}' values from the operator's config
+ Possible values:
+ - wait - should wait to exclude host, complete queries and include host back into the cluster
+ - nowait - should NOT wait to exclude host, complete queries and include host back into the cluster
+ enum:
+ - ""
+ - "wait"
+ - "nowait"
+ configMapPropagationTimeout:
+ type: integer
+ description: |
+ Timeout in seconds for `clickhouse-operator` to wait for modified `ConfigMap` to propagate into the `Pod`
+ More details: https://kubernetes.io/docs/concepts/configuration/configmap/#mounted-configmaps-are-updated-automatically
+ minimum: 0
+ maximum: 3600
+ cleanup:
+ type: object
+ description: "Optional, defines behavior for cleanup Kubernetes resources during reconcile cycle"
+ # nullable: true
+ properties:
+ unknownObjects:
+ type: object
+ description: |
+ Describes what clickhouse-operator should do with found Kubernetes resources which should be managed by clickhouse-operator,
+ but do not have `ownerReference` to any currently managed `ClickHouseInstallation` resource.
+ Default behavior is `Delete`"
+ # nullable: true
+ properties:
+ statefulSet: &TypeObjectsCleanup
+ type: string
+ description: "Behavior policy for unknown StatefulSet, `Delete` by default"
+ enum:
+ # List ObjectsCleanupXXX constants from model
+ - ""
+ - "Retain"
+ - "Delete"
+ pvc:
+ type: string
+ !!merge <<: *TypeObjectsCleanup
+ description: "Behavior policy for unknown PVC, `Delete` by default"
+ configMap:
+ !!merge <<: *TypeObjectsCleanup
+ description: "Behavior policy for unknown ConfigMap, `Delete` by default"
+ service:
+ !!merge <<: *TypeObjectsCleanup
+ description: "Behavior policy for unknown Service, `Delete` by default"
+ reconcileFailedObjects:
+ type: object
+ description: |
+ Describes what clickhouse-operator should do with Kubernetes resources which are failed during reconcile.
+ Default behavior is `Retain`"
+ # nullable: true
+ properties:
+ statefulSet:
+ !!merge <<: *TypeObjectsCleanup
+ description: "Behavior policy for failed StatefulSet, `Retain` by default"
+ pvc:
+ !!merge <<: *TypeObjectsCleanup
+ description: "Behavior policy for failed PVC, `Retain` by default"
+ configMap:
+ !!merge <<: *TypeObjectsCleanup
+ description: "Behavior policy for failed ConfigMap, `Retain` by default"
+ service:
+ !!merge <<: *TypeObjectsCleanup
+ description: "Behavior policy for failed Service, `Retain` by default"
+ defaults:
+ type: object
+ description: |
+ define default behavior for whole ClickHouseInstallation, some behavior can be re-define on cluster, shard and replica level
+ More info: https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#specdefaults
+ # nullable: true
+ properties:
+ replicasUseFQDN:
+ !!merge <<: *TypeStringBool
+ description: |
+ define should replicas be specified by FQDN in ``.
+ In case of "no" will use short hostname and clickhouse-server will use kubernetes default suffixes for DNS lookup
+ "no" by default
+ distributedDDL:
+ type: object
+ description: |
+ allows change `` settings
+ More info: https://clickhouse.tech/docs/en/operations/server-configuration-parameters/settings/#server-settings-distributed_ddl
+ # nullable: true
+ properties:
+ profile:
+ type: string
+ description: "Settings from this profile will be used to execute DDL queries"
+ storageManagement:
+ type: object
+ description: default storage management options
+ properties:
+ provisioner: &TypePVCProvisioner
+ type: string
+ description: "defines `PVC` provisioner - be it StatefulSet or the Operator"
+ enum:
+ - ""
+ - "StatefulSet"
+ - "Operator"
+ reclaimPolicy: &TypePVCReclaimPolicy
+ type: string
+ description: |
+ defines behavior of `PVC` deletion.
+ `Delete` by default, if `Retain` specified then `PVC` will be kept when deleting StatefulSet
+ enum:
+ - ""
+ - "Retain"
+ - "Delete"
+ templates: &TypeTemplateNames
+ type: object
+ description: "optional, configuration of the templates names which will use for generate Kubernetes resources according to one or more ClickHouse clusters described in current ClickHouseInstallation (chi) resource"
+ # nullable: true
+ properties:
+ hostTemplate:
+ type: string
+ description: "optional, template name from chi.spec.templates.hostTemplates, which will apply to configure every `clickhouse-server` instance during render ConfigMap resources which will mount into `Pod`"
+ podTemplate:
+ type: string
+ description: "optional, template name from chi.spec.templates.podTemplates, allows customization each `Pod` resource during render and reconcile each StatefulSet.spec resource described in `chi.spec.configuration.clusters`"
+ dataVolumeClaimTemplate:
+ type: string
+ description: "optional, template name from chi.spec.templates.volumeClaimTemplates, allows customization each `PVC` which will mount for clickhouse data directory in each `Pod` during render and reconcile every StatefulSet.spec resource described in `chi.spec.configuration.clusters`"
+ logVolumeClaimTemplate:
+ type: string
+ description: "optional, template name from chi.spec.templates.volumeClaimTemplates, allows customization each `PVC` which will mount for clickhouse log directory in each `Pod` during render and reconcile every StatefulSet.spec resource described in `chi.spec.configuration.clusters`"
+ serviceTemplate:
+ type: string
+ description: "optional, template name from chi.spec.templates.serviceTemplates. used for customization of the `Service` resource, created by `clickhouse-operator` to cover all clusters in whole `chi` resource"
+ serviceTemplates:
+ type: array
+ description: "optional, template names from chi.spec.templates.serviceTemplates. used for customization of the `Service` resources, created by `clickhouse-operator` to cover all clusters in whole `chi` resource"
+ nullable: true
+ items:
+ type: string
+ clusterServiceTemplate:
+ type: string
+ description: "optional, template name from chi.spec.templates.serviceTemplates, allows customization for each `Service` resource which will created by `clickhouse-operator` which cover each clickhouse cluster described in `chi.spec.configuration.clusters`"
+ shardServiceTemplate:
+ type: string
+ description: "optional, template name from chi.spec.templates.serviceTemplates, allows customization for each `Service` resource which will created by `clickhouse-operator` which cover each shard inside clickhouse cluster described in `chi.spec.configuration.clusters`"
+ replicaServiceTemplate:
+ type: string
+ description: "optional, template name from chi.spec.templates.serviceTemplates, allows customization for each `Service` resource which will created by `clickhouse-operator` which cover each replica inside each shard inside each clickhouse cluster described in `chi.spec.configuration.clusters`"
+ volumeClaimTemplate:
+ type: string
+ description: "optional, alias for dataVolumeClaimTemplate, template name from chi.spec.templates.volumeClaimTemplates, allows customization each `PVC` which will mount for clickhouse data directory in each `Pod` during render and reconcile every StatefulSet.spec resource described in `chi.spec.configuration.clusters`"
+ configuration:
+ type: object
+ description: "allows configure multiple aspects and behavior for `clickhouse-server` instance and also allows describe multiple `clickhouse-server` clusters inside one `chi` resource"
+ # nullable: true
+ properties:
+ settings: &TypeSettings
+ type: object
+ description: |
+ allows configure multiple aspects and behavior for `clickhouse-keeper` instance
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ files: &TypeFiles
+ type: object
+ description: |
+ allows define content of any setting
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ clusters:
+ type: array
+ description: |
+ describes clusters layout and allows change settings on cluster-level and replica-level
+ # nullable: true
+ items:
+ type: object
+ #required:
+ # - name
+ properties:
+ name:
+ type: string
+ description: "cluster name, used to identify set of servers and wide used during generate names of related Kubernetes resources"
+ minLength: 1
+ # See namePartClusterMaxLen const
+ maxLength: 15
+ pattern: "^[a-zA-Z0-9-]{0,15}$"
+ settings:
+ !!merge <<: *TypeSettings
+ description: |
+ optional, allows configure `clickhouse-server` settings inside ... tag in each `Pod` only in one cluster during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/`
+ override top-level `chi.spec.configuration.settings`
+ More details: https://clickhouse.tech/docs/en/operations/settings/settings/
+ files:
+ !!merge <<: *TypeFiles
+ description: |
+ optional, allows define content of any setting file inside each `Pod` on current cluster during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/`
+ override top-level `chi.spec.configuration.files`
+ pdbManaged:
+ !!merge <<: *TypeStringBool
+ description: |
+ Specifies whether the Pod Disruption Budget (PDB) should be managed.
+ During the next installation, if PDB management is enabled, the operator will
+ attempt to retrieve any existing PDB. If none is found, it will create a new one
+ and initiate a reconciliation loop. If PDB management is disabled, the existing PDB
+ will remain intact, and the reconciliation loop will not be executed. By default,
+ PDB management is enabled.
+ pdbMaxUnavailable:
+ type: integer
+ description: |
+ Pod eviction is allowed if at most "pdbMaxUnavailable" pods are unavailable after the eviction,
+ i.e. even in absence of the evicted pod. For example, one can prevent all voluntary evictions
+ by specifying 0. This is a mutually exclusive setting with "minAvailable".
+ minimum: 0
+ maximum: 65535
+ templates:
+ !!merge <<: *TypeTemplateNames
+ description: |
+ optional, configuration of the templates names which will use for generate Kubernetes resources according to selected cluster
+ override top-level `chi.spec.configuration.templates`
+ layout:
+ type: object
+ description: |
+ describe current cluster layout, how much shards in cluster, how much replica in shard
+ allows override settings on each shard and replica separatelly
+ # nullable: true
+ properties:
+ replicasCount:
+ type: integer
+ description: |
+ how much replicas in each shards for current cluster will run in Kubernetes,
+ each replica is a separate `StatefulSet` which contains only one `Pod` with `clickhouse-server` instance,
+ every shard contains 1 replica by default"
+ replicas:
+ type: array
+ description: "optional, allows override top-level `chi.spec.configuration` and cluster-level `chi.spec.configuration.clusters` configuration for each replica and each shard relates to selected replica, use it only if you fully understand what you do"
+ # nullable: true
+ items:
+ type: object
+ properties:
+ name:
+ type: string
+ description: "optional, by default replica name is generated, but you can override it and setup custom name"
+ minLength: 1
+ # See namePartShardMaxLen const
+ maxLength: 15
+ pattern: "^[a-zA-Z0-9-]{0,15}$"
+ settings:
+ !!merge <<: *TypeSettings
+ description: |
+ optional, allows configure `clickhouse-server` settings inside ... tag in `Pod` only in one replica during generate `ConfigMap` which will mount in `/etc/clickhouse-server/conf.d/`
+ override top-level `chi.spec.configuration.settings`, cluster-level `chi.spec.configuration.clusters.settings` and will ignore if shard-level `chi.spec.configuration.clusters.layout.shards` present
+ More details: https://clickhouse.tech/docs/en/operations/settings/settings/
+ files:
+ !!merge <<: *TypeFiles
+ description: |
+ optional, allows define content of any setting file inside each `Pod` only in one replica during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/`
+ override top-level `chi.spec.configuration.files` and cluster-level `chi.spec.configuration.clusters.files`, will ignore if `chi.spec.configuration.clusters.layout.shards` presents
+ templates:
+ !!merge <<: *TypeTemplateNames
+ description: |
+ optional, configuration of the templates names which will use for generate Kubernetes resources according to selected replica
+ override top-level `chi.spec.configuration.templates`, cluster-level `chi.spec.configuration.clusters.templates`
+ shardsCount:
+ type: integer
+ description: "optional, count of shards related to current replica, you can override each shard behavior on low-level `chi.spec.configuration.clusters.layout.replicas.shards`"
+ minimum: 1
+ shards:
+ type: array
+ description: "optional, list of shards related to current replica, will ignore if `chi.spec.configuration.clusters.layout.shards` presents"
+ # nullable: true
+ items:
+ # Host
+ type: object
+ properties:
+ name:
+ type: string
+ description: "optional, by default shard name is generated, but you can override it and setup custom name"
+ minLength: 1
+ # See namePartReplicaMaxLen const
+ maxLength: 15
+ pattern: "^[a-zA-Z0-9-]{0,15}$"
+ zkPort:
+ type: integer
+ minimum: 1
+ maximum: 65535
+ raftPort:
+ type: integer
+ minimum: 1
+ maximum: 65535
+ settings:
+ !!merge <<: *TypeSettings
+ description: |
+ optional, allows configure `clickhouse-server` settings inside ... tag in `Pod` only in one shard related to current replica during generate `ConfigMap` which will mount in `/etc/clickhouse-server/conf.d/`
+ override top-level `chi.spec.configuration.settings`, cluster-level `chi.spec.configuration.clusters.settings` and replica-level `chi.spec.configuration.clusters.layout.replicas.settings`
+ More details: https://clickhouse.tech/docs/en/operations/settings/settings/
+ files:
+ !!merge <<: *TypeFiles
+ description: |
+ optional, allows define content of any setting file inside each `Pod` only in one shard related to current replica during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/`
+ override top-level `chi.spec.configuration.files` and cluster-level `chi.spec.configuration.clusters.files`, will ignore if `chi.spec.configuration.clusters.layout.shards` presents
+ templates:
+ !!merge <<: *TypeTemplateNames
+ description: |
+ optional, configuration of the templates names which will use for generate Kubernetes resources according to selected replica
+ override top-level `chi.spec.configuration.templates`, cluster-level `chi.spec.configuration.clusters.templates`, replica-level `chi.spec.configuration.clusters.layout.replicas.templates`
+ templates:
+ type: object
+ description: "allows define templates which will use for render Kubernetes resources like StatefulSet, ConfigMap, Service, PVC, by default, clickhouse-operator have own templates, but you can override it"
+ # nullable: true
+ properties:
+ hostTemplates:
+ type: array
+ description: "hostTemplate will use during apply to generate `clickhose-server` config files"
+ # nullable: true
+ items:
+ type: object
+ #required:
+ # - name
+ properties:
+ name:
+ description: "template name, could use to link inside top-level `chi.spec.defaults.templates.hostTemplate`, cluster-level `chi.spec.configuration.clusters.templates.hostTemplate`, shard-level `chi.spec.configuration.clusters.layout.shards.temlates.hostTemplate`, replica-level `chi.spec.configuration.clusters.layout.replicas.templates.hostTemplate`"
+ type: string
+ portDistribution:
+ type: array
+ description: "define how will distribute numeric values of named ports in `Pod.spec.containers.ports` and clickhouse-server configs"
+ # nullable: true
+ items:
+ type: object
+ #required:
+ # - type
+ properties:
+ type:
+ type: string
+ description: "type of distribution, when `Unspecified` (default value) then all listen ports on clickhouse-server configuration in all Pods will have the same value, when `ClusterScopeIndex` then ports will increment to offset from base value depends on shard and replica index inside cluster with combination of `chi.spec.templates.podTemlates.spec.HostNetwork` it allows setup ClickHouse cluster inside Kubernetes and provide access via external network bypass Kubernetes internal network"
+ enum:
+ # List PortDistributionXXX constants
+ - ""
+ - "Unspecified"
+ - "ClusterScopeIndex"
+ spec:
+ # Host
+ type: object
+ properties:
+ name:
+ type: string
+ description: "by default, hostname will generate, but this allows define custom name for each `clickhouse-server`"
+ minLength: 1
+ # See namePartReplicaMaxLen const
+ maxLength: 15
+ pattern: "^[a-zA-Z0-9-]{0,15}$"
+ zkPort:
+ type: integer
+ minimum: 1
+ maximum: 65535
+ raftPort:
+ type: integer
+ minimum: 1
+ maximum: 65535
+ settings:
+ !!merge <<: *TypeSettings
+ description: |
+ optional, allows configure `clickhouse-server` settings inside ... tag in each `Pod` where this template will apply during generate `ConfigMap` which will mount in `/etc/clickhouse-server/conf.d/`
+ More details: https://clickhouse.tech/docs/en/operations/settings/settings/
+ files:
+ !!merge <<: *TypeFiles
+ description: |
+ optional, allows define content of any setting file inside each `Pod` where this template will apply during generate `ConfigMap` which will mount in `/etc/clickhouse-server/config.d/` or `/etc/clickhouse-server/conf.d/` or `/etc/clickhouse-server/users.d/`
+ templates:
+ !!merge <<: *TypeTemplateNames
+ description: "be careful, this part of CRD allows override template inside template, don't use it if you don't understand what you do"
+ podTemplates:
+ type: array
+ description: |
+ podTemplate will use during render `Pod` inside `StatefulSet.spec` and allows define rendered `Pod.spec`, pod scheduling distribution and pod zone
+ More information: https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#spectemplatespodtemplates
+ # nullable: true
+ items:
+ type: object
+ #required:
+ # - name
+ properties:
+ name:
+ type: string
+ description: "template name, could use to link inside top-level `chi.spec.defaults.templates.podTemplate`, cluster-level `chi.spec.configuration.clusters.templates.podTemplate`, shard-level `chi.spec.configuration.clusters.layout.shards.temlates.podTemplate`, replica-level `chi.spec.configuration.clusters.layout.replicas.templates.podTemplate`"
+ generateName:
+ type: string
+ description: "allows define format for generated `Pod` name, look to https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#spectemplatesservicetemplates for details about available template variables"
+ zone:
+ type: object
+ description: "allows define custom zone name and will separate ClickHouse `Pods` between nodes, shortcut for `chi.spec.templates.podTemplates.spec.affinity.podAntiAffinity`"
+ #required:
+ # - values
+ properties:
+ key:
+ type: string
+ description: "optional, if defined, allows select kubernetes nodes by label with `name` equal `key`"
+ values:
+ type: array
+ description: "optional, if defined, allows select kubernetes nodes by label with `value` in `values`"
+ # nullable: true
+ items:
+ type: string
+ distribution:
+ type: string
+ description: "DEPRECATED, shortcut for `chi.spec.templates.podTemplates.spec.affinity.podAntiAffinity`"
+ enum:
+ - ""
+ - "Unspecified"
+ - "OnePerHost"
+ podDistribution:
+ type: array
+ description: "define ClickHouse Pod distribution policy between Kubernetes Nodes inside Shard, Replica, Namespace, CHI, another ClickHouse cluster"
+ # nullable: true
+ items:
+ type: object
+ #required:
+ # - type
+ properties:
+ type:
+ type: string
+ description: "you can define multiple affinity policy types"
+ enum:
+ # List PodDistributionXXX constants
+ - ""
+ - "Unspecified"
+ - "ClickHouseAntiAffinity"
+ - "ShardAntiAffinity"
+ - "ReplicaAntiAffinity"
+ - "AnotherNamespaceAntiAffinity"
+ - "AnotherClickHouseInstallationAntiAffinity"
+ - "AnotherClusterAntiAffinity"
+ - "MaxNumberPerNode"
+ - "NamespaceAffinity"
+ - "ClickHouseInstallationAffinity"
+ - "ClusterAffinity"
+ - "ShardAffinity"
+ - "ReplicaAffinity"
+ - "PreviousTailAffinity"
+ - "CircularReplication"
+ scope:
+ type: string
+ description: "scope for apply each podDistribution"
+ enum:
+ # list PodDistributionScopeXXX constants
+ - ""
+ - "Unspecified"
+ - "Shard"
+ - "Replica"
+ - "Cluster"
+ - "ClickHouseInstallation"
+ - "Namespace"
+ number:
+ type: integer
+ description: "define, how much ClickHouse Pods could be inside selected scope with selected distribution type"
+ minimum: 0
+ maximum: 65535
+ topologyKey:
+ type: string
+ description: |
+ use for inter-pod affinity look to `pod.spec.affinity.podAntiAffinity.preferredDuringSchedulingIgnoredDuringExecution.podAffinityTerm.topologyKey`,
+ more info: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity"
+ metadata:
+ type: object
+ description: |
+ allows pass standard object's metadata from template to Pod
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ spec:
+ # TODO specify PodSpec
+ type: object
+ description: "allows define whole Pod.spec inside StaefulSet.spec, look to https://kubernetes.io/docs/concepts/workloads/pods/#pod-templates for details"
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ volumeClaimTemplates:
+ type: array
+ description: |
+ allows define template for rendering `PVC` kubernetes resource, which would use inside `Pod` for mount clickhouse `data`, clickhouse `logs` or something else
+ # nullable: true
+ items:
+ type: object
+ #required:
+ # - name
+ # - spec
+ properties:
+ name:
+ type: string
+ description: |
+ template name, could use to link inside
+ top-level `chi.spec.defaults.templates.dataVolumeClaimTemplate` or `chi.spec.defaults.templates.logVolumeClaimTemplate`,
+ cluster-level `chi.spec.configuration.clusters.templates.dataVolumeClaimTemplate` or `chi.spec.configuration.clusters.templates.logVolumeClaimTemplate`,
+ shard-level `chi.spec.configuration.clusters.layout.shards.temlates.dataVolumeClaimTemplate` or `chi.spec.configuration.clusters.layout.shards.temlates.logVolumeClaimTemplate`
+ replica-level `chi.spec.configuration.clusters.layout.replicas.templates.dataVolumeClaimTemplate` or `chi.spec.configuration.clusters.layout.replicas.templates.logVolumeClaimTemplate`
+ provisioner: *TypePVCProvisioner
+ reclaimPolicy: *TypePVCReclaimPolicy
+ metadata:
+ type: object
+ description: |
+ allows to pass standard object's metadata from template to PVC
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ spec:
+ type: object
+ description: |
+ allows define all aspects of `PVC` resource
+ More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#persistentvolumeclaims
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ serviceTemplates:
+ type: array
+ description: |
+ allows define template for rendering `Service` which would get endpoint from Pods which scoped chi-wide, cluster-wide, shard-wide, replica-wide level
+ # nullable: true
+ items:
+ type: object
+ #required:
+ # - name
+ # - spec
+ properties:
+ name:
+ type: string
+ description: |
+ template name, could use to link inside
+ chi-level `chi.spec.defaults.templates.serviceTemplate`
+ cluster-level `chi.spec.configuration.clusters.templates.clusterServiceTemplate`
+ shard-level `chi.spec.configuration.clusters.layout.shards.temlates.shardServiceTemplate`
+ replica-level `chi.spec.configuration.clusters.layout.replicas.templates.replicaServiceTemplate` or `chi.spec.configuration.clusters.layout.shards.replicas.replicaServiceTemplate`
+ generateName:
+ type: string
+ description: |
+ allows define format for generated `Service` name,
+ look to https://github.com/Altinity/clickhouse-operator/blob/master/docs/custom_resource_explained.md#spectemplatesservicetemplates
+ for details about available template variables"
+ metadata:
+ # TODO specify ObjectMeta
+ type: object
+ description: |
+ allows pass standard object's metadata from template to Service
+ Could be use for define specificly for Cloud Provider metadata which impact to behavior of service
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+ spec:
+ # TODO specify ServiceSpec
+ type: object
+ description: |
+ describe behavior of generated Service
+ More info: https://kubernetes.io/docs/concepts/services-networking/service/
+ # nullable: true
+ x-kubernetes-preserve-unknown-fields: true
+
+ clickhouseoperatorconfigurations.yaml: |
+ # Template Parameters:
+ #
+ # NONE
+ #
+ apiVersion: apiextensions.k8s.io/v1
+ kind: CustomResourceDefinition
+ metadata:
+ name: clickhouseoperatorconfigurations.clickhouse.altinity.com
+ labels:
+ clickhouse.altinity.com/chop: 0.25.6
+ spec:
+ group: clickhouse.altinity.com
+ scope: Namespaced
+ names:
+ kind: ClickHouseOperatorConfiguration
+ singular: clickhouseoperatorconfiguration
+ plural: clickhouseoperatorconfigurations
+ shortNames:
+ - chopconf
+ versions:
+ - name: v1
+ served: true
+ storage: true
+ additionalPrinterColumns:
+ - name: namespaces
+ type: string
+ description: Watch namespaces
+ jsonPath: .status
+ - name: age
+ type: date
+ description: Age of the resource
+ # Displayed in all priorities
+ jsonPath: .metadata.creationTimestamp
+ schema:
+ openAPIV3Schema:
+ type: object
+ description: "allows customize `clickhouse-operator` settings, need restart clickhouse-operator pod after adding, more details https://github.com/Altinity/clickhouse-operator/blob/master/docs/operator_configuration.md"
+ x-kubernetes-preserve-unknown-fields: true
+ properties:
+ status:
+ type: object
+ x-kubernetes-preserve-unknown-fields: true
+ spec:
+ type: object
+ description: |
+ Allows to define settings of the clickhouse-operator.
+ More info: https://github.com/Altinity/clickhouse-operator/blob/master/config/config.yaml
+ Check into etc-clickhouse-operator* ConfigMaps if you need more control
+ x-kubernetes-preserve-unknown-fields: true
+ properties:
+ watch:
+ type: object
+ description: "Parameters for watch kubernetes resources which used by clickhouse-operator deployment"
+ properties:
+ namespaces:
+ type: object
+ description: "List of namespaces where clickhouse-operator watches for events."
+ x-kubernetes-preserve-unknown-fields: true
+ clickhouse:
+ type: object
+ description: "Clickhouse related parameters used by clickhouse-operator"
+ properties:
+ configuration:
+ type: object
+ properties:
+ file:
+ type: object
+ properties:
+ path:
+ type: object
+ description: |
+ Each 'path' can be either absolute or relative.
+ In case path is absolute - it is used as is.
+ In case path is relative - it is relative to the folder where configuration file you are reading right now is located.
+ properties:
+ common:
+ type: string
+ description: |
+ Path to the folder where ClickHouse configuration files common for all instances within a CHI are located.
+ Default value - config.d
+ host:
+ type: string
+ description: |
+ Path to the folder where ClickHouse configuration files unique for each instance (host) within a CHI are located.
+ Default value - conf.d
+ user:
+ type: string
+ description: |
+ Path to the folder where ClickHouse configuration files with users settings are located.
+ Files are common for all instances within a CHI.
+ Default value - users.d
+ user:
+ type: object
+ description: "Default parameters for any user which will create"
+ properties:
+ default:
+ type: object
+ properties:
+ profile:
+ type: string
+ description: "ClickHouse server configuration `...` for any "
+ quota:
+ type: string
+ description: "ClickHouse server configuration `...` for any "
+ networksIP:
+ type: array
+ description: "ClickHouse server configuration `...` for any "
+ items:
+ type: string
+ password:
+ type: string
+ description: "ClickHouse server configuration `...` for any "
+ network:
+ type: object
+ description: "Default network parameters for any user which will create"
+ properties:
+ hostRegexpTemplate:
+ type: string
+ description: "ClickHouse server configuration `...` for any "
+ configurationRestartPolicy:
+ type: object
+ description: "Configuration restart policy describes what configuration changes require ClickHouse restart"
+ properties:
+ rules:
+ type: array
+ description: "Array of set of rules per specified ClickHouse versions"
+ items:
+ type: object
+ properties:
+ version:
+ type: string
+ description: "ClickHouse version expression"
+ rules:
+ type: array
+ description: "Set of configuration rules for specified ClickHouse version"
+ items:
+ type: object
+ description: "setting: value pairs for configuration restart policy"
+ x-kubernetes-preserve-unknown-fields: true
+ access:
+ type: object
+ description: "parameters which use for connect to clickhouse from clickhouse-operator deployment"
+ properties:
+ scheme:
+ type: string
+ description: "The scheme to user for connecting to ClickHouse. Possible values: http, https, auto"
+ username:
+ type: string
+ description: "ClickHouse username to be used by operator to connect to ClickHouse instances, deprecated, use chCredentialsSecretName"
+ password:
+ type: string
+ description: "ClickHouse password to be used by operator to connect to ClickHouse instances, deprecated, use chCredentialsSecretName"
+ rootCA:
+ type: string
+ description: "Root certificate authority that clients use when verifying server certificates. Used for https connection to ClickHouse"
+ secret:
+ type: object
+ properties:
+ namespace:
+ type: string
+ description: "Location of k8s Secret with username and password to be used by operator to connect to ClickHouse instances"
+ name:
+ type: string
+ description: "Name of k8s Secret with username and password to be used by operator to connect to ClickHouse instances"
+ port:
+ type: integer
+ minimum: 1
+ maximum: 65535
+ description: "Port to be used by operator to connect to ClickHouse instances"
+ timeouts:
+ type: object
+ description: "Timeouts used to limit connection and queries from the operator to ClickHouse instances, In seconds"
+ properties:
+ connect:
+ type: integer
+ minimum: 1
+ maximum: 10
+ description: "Timout to setup connection from the operator to ClickHouse instances. In seconds."
+ query:
+ type: integer
+ minimum: 1
+ maximum: 600
+ description: "Timout to perform SQL query from the operator to ClickHouse instances. In seconds."
+ addons:
+ type: object
+ description: "Configuration addons specifies additional settings"
+ properties:
+ rules:
+ type: array
+ description: "Array of set of rules per specified ClickHouse versions"
+ items:
+ type: object
+ properties:
+ version:
+ type: string
+ description: "ClickHouse version expression"
+ spec:
+ type: object
+ description: "spec"
+ properties:
+ configuration:
+ type: object
+ description: "allows configure multiple aspects and behavior for `clickhouse-server` instance and also allows describe multiple `clickhouse-server` clusters inside one `chi` resource"
+ properties:
+ users:
+ type: object
+ description: "see same section from CR spec"
+ x-kubernetes-preserve-unknown-fields: true
+ profiles:
+ type: object
+ description: "see same section from CR spec"
+ x-kubernetes-preserve-unknown-fields: true
+ quotas:
+ type: object
+ description: "see same section from CR spec"
+ x-kubernetes-preserve-unknown-fields: true
+ settings:
+ type: object
+ description: "see same section from CR spec"
+ x-kubernetes-preserve-unknown-fields: true
+ files:
+ type: object
+ description: "see same section from CR spec"
+ x-kubernetes-preserve-unknown-fields: true
+ metrics:
+ type: object
+ description: "parameters which use for connect to fetch metrics from clickhouse by clickhouse-operator"
+ properties:
+ timeouts:
+ type: object
+ description: |
+ Timeouts used to limit connection and queries from the metrics exporter to ClickHouse instances
+ Specified in seconds.
+ properties:
+ collect:
+ type: integer
+ minimum: 1
+ maximum: 600
+ description: |
+ Timeout used to limit metrics collection request. In seconds.
+ Upon reaching this timeout metrics collection is aborted and no more metrics are collected in this cycle.
+ All collected metrics are returned.
+ template:
+ type: object
+ description: "Parameters which are used if you want to generate ClickHouseInstallationTemplate custom resources from files which are stored inside clickhouse-operator deployment"
+ properties:
+ chi:
+ type: object
+ properties:
+ policy:
+ type: string
+ description: |
+ CHI template updates handling policy
+ Possible policy values:
+ - ReadOnStart. Accept CHIT updates on the operators start only.
+ - ApplyOnNextReconcile. Accept CHIT updates at all time. Apply news CHITs on next regular reconcile of the CHI
+ enum:
+ - ""
+ - "ReadOnStart"
+ - "ApplyOnNextReconcile"
+ path:
+ type: string
+ description: "Path to folder where ClickHouseInstallationTemplate .yaml manifests are located."
+ reconcile:
+ type: object
+ description: "allow tuning reconciling process"
+ properties:
+ runtime:
+ type: object
+ description: "runtime parameters for clickhouse-operator process which are used during reconcile cycle"
+ properties:
+ reconcileCHIsThreadsNumber:
+ type: integer
+ minimum: 1
+ maximum: 65535
+ description: "How many goroutines will be used to reconcile CHIs in parallel, 10 by default"
+ reconcileShardsThreadsNumber:
+ type: integer
+ minimum: 1
+ maximum: 65535
+ description: "How many goroutines will be used to reconcile shards of a cluster in parallel, 1 by default"
+ reconcileShardsMaxConcurrencyPercent:
+ type: integer
+ minimum: 0
+ maximum: 100
+ description: "The maximum percentage of cluster shards that may be reconciled in parallel, 50 percent by default."
+ statefulSet:
+ type: object
+ description: "Allow change default behavior for reconciling StatefulSet which generated by clickhouse-operator"
+ properties:
+ create:
+ type: object
+ description: "Behavior during create StatefulSet"
+ properties:
+ onFailure:
+ type: string
+ description: |
+ What to do in case created StatefulSet is not in Ready after `statefulSetUpdateTimeout` seconds
+ Possible options:
+ 1. abort - do nothing, just break the process and wait for admin.
+ 2. delete - delete newly created problematic StatefulSet.
+ 3. ignore (default) - ignore error, pretend nothing happened and move on to the next StatefulSet.
+ update:
+ type: object
+ description: "Behavior during update StatefulSet"
+ properties:
+ timeout:
+ type: integer
+ description: "How many seconds to wait for created/updated StatefulSet to be Ready"
+ pollInterval:
+ type: integer
+ description: "How many seconds to wait between checks for created/updated StatefulSet status"
+ onFailure:
+ type: string
+ description: |
+ What to do in case updated StatefulSet is not in Ready after `statefulSetUpdateTimeout` seconds
+ Possible options:
+ 1. abort - do nothing, just break the process and wait for admin.
+ 2. rollback (default) - delete Pod and rollback StatefulSet to previous Generation. Pod would be recreated by StatefulSet based on rollback-ed configuration.
+ 3. ignore - ignore error, pretend nothing happened and move on to the next StatefulSet.
+ host:
+ type: object
+ description: |
+ Whether the operator during reconcile procedure should wait for a ClickHouse host:
+ - to be excluded from a ClickHouse cluster
+ - to complete all running queries
+ - to be included into a ClickHouse cluster
+ respectfully before moving forward
+ properties:
+ wait:
+ type: object
+ properties:
+ exclude: &TypeStringBool
+ type: string
+ description: "Whether the operator during reconcile procedure should wait for a ClickHouse host to be excluded from a ClickHouse cluster"
+ enum:
+ # List StringBoolXXX constants from model
+ - ""
+ - "0"
+ - "1"
+ - "False"
+ - "false"
+ - "True"
+ - "true"
+ - "No"
+ - "no"
+ - "Yes"
+ - "yes"
+ - "Off"
+ - "off"
+ - "On"
+ - "on"
+ - "Disable"
+ - "disable"
+ - "Enable"
+ - "enable"
+ - "Disabled"
+ - "disabled"
+ - "Enabled"
+ - "enabled"
+ queries:
+ !!merge <<: *TypeStringBool
+ description: "Whether the operator during reconcile procedure should wait for a ClickHouse host to complete all running queries"
+ include:
+ !!merge <<: *TypeStringBool
+ description: "Whether the operator during reconcile procedure should wait for a ClickHouse host to be included into a ClickHouse cluster"
+ replicas:
+ type: object
+ description: "Whether the operator during reconcile procedure should wait for replicas to catch-up"
+ properties:
+ all:
+ !!merge <<: *TypeStringBool
+ description: "Whether the operator during reconcile procedure should wait for all replicas to catch-up"
+ new:
+ !!merge <<: *TypeStringBool
+ description: "Whether the operator during reconcile procedure should wait for new replicas to catch-up"
+ delay:
+ type: integer
+ description: "replication max absolute delay to consider replica is not delayed"
+ probes:
+ type: object
+ description: "What probes the operator should wait during host launch procedure"
+ properties:
+ startup:
+ !!merge <<: *TypeStringBool
+ description: |
+ Whether the operator during host launch procedure should wait for startup probe to succeed.
+ In case probe is unspecified wait is assumed to be completed successfully.
+ Default option value is to do not wait.
+ readiness:
+ !!merge <<: *TypeStringBool
+ description: |
+ Whether the operator during host launch procedure should wait for readiness probe to succeed.
+ In case probe is unspecified wait is assumed to be completed successfully.
+ Default option value is to wait.
+ drop:
+ type: object
+ properties:
+ replicas:
+ type: object
+ description: |
+ Whether the operator during reconcile procedure should drop replicas when replica is deleted or recreated
+ properties:
+ onDelete:
+ !!merge <<: *TypeStringBool
+ description: |
+ Whether the operator during reconcile procedure should drop replicas when replica is deleted
+ onLostVolume:
+ !!merge <<: *TypeStringBool
+ description: |
+ Whether the operator during reconcile procedure should drop replicas when replica volume is lost
+ active:
+ !!merge <<: *TypeStringBool
+ description: |
+ Whether the operator during reconcile procedure should drop active replicas when replica is deleted or recreated
+ annotation:
+ type: object
+ description: "defines which metadata.annotations items will include or exclude during render StatefulSet, Pod, PVC resources"
+ properties:
+ include:
+ type: array
+ description: |
+ When propagating labels from the chi's `metadata.annotations` section to child objects' `metadata.annotations`,
+ include annotations with names from the following list
+ items:
+ type: string
+ exclude:
+ type: array
+ description: |
+ When propagating labels from the chi's `metadata.annotations` section to child objects' `metadata.annotations`,
+ exclude annotations with names from the following list
+ items:
+ type: string
+ label:
+ type: object
+ description: "defines which metadata.labels will include or exclude during render StatefulSet, Pod, PVC resources"
+ properties:
+ include:
+ type: array
+ description: |
+ When propagating labels from the chi's `metadata.labels` section to child objects' `metadata.labels`,
+ include labels from the following list
+ items:
+ type: string
+ exclude:
+ type: array
+ items:
+ type: string
+ description: |
+ When propagating labels from the chi's `metadata.labels` section to child objects' `metadata.labels`,
+ exclude labels from the following list
+ appendScope:
+ !!merge <<: *TypeStringBool
+ description: |
+ Whether to append *Scope* labels to StatefulSet and Pod
+ - "LabelShardScopeIndex"
+ - "LabelReplicaScopeIndex"
+ - "LabelCHIScopeIndex"
+ - "LabelCHIScopeCycleSize"
+ - "LabelCHIScopeCycleIndex"
+ - "LabelCHIScopeCycleOffset"
+ - "LabelClusterScopeIndex"
+ - "LabelClusterScopeCycleSize"
+ - "LabelClusterScopeCycleIndex"
+ - "LabelClusterScopeCycleOffset"
+ metrics:
+ type: object
+ description: "defines metrics exporter options"
+ properties:
+ labels:
+ type: object
+ description: "defines metric labels options"
+ properties:
+ exclude:
+ type: array
+ description: |
+ When adding labels to a metric exclude labels with names from the following list
+ items:
+ type: string
+ status:
+ type: object
+ description: "defines status options"
+ properties:
+ fields:
+ type: object
+ description: "defines status fields options"
+ properties:
+ action:
+ !!merge <<: *TypeStringBool
+ description: "Whether the operator should fill status field 'action'"
+ actions:
+ !!merge <<: *TypeStringBool
+ description: "Whether the operator should fill status field 'actions'"
+ error:
+ !!merge <<: *TypeStringBool
+ description: "Whether the operator should fill status field 'error'"
+ errors:
+ !!merge <<: *TypeStringBool
+ description: "Whether the operator should fill status field 'errors'"
+ statefulSet:
+ type: object
+ description: "define StatefulSet-specific parameters"
+ properties:
+ revisionHistoryLimit:
+ type: integer
+ description: "revisionHistoryLimit is the maximum number of revisions that will be\nmaintained in the StatefulSet's revision history. \nLook details in `statefulset.spec.revisionHistoryLimit`\n"
+ pod:
+ type: object
+ description: "define pod specific parameters"
+ properties:
+ terminationGracePeriod:
+ type: integer
+ description: "Optional duration in seconds the pod needs to terminate gracefully. \nLook details in `pod.spec.terminationGracePeriodSeconds`\n"
+ logger:
+ type: object
+ description: "allow setup clickhouse-operator logger behavior"
+ properties:
+ logtostderr:
+ type: string
+ description: "boolean, allows logs to stderr"
+ alsologtostderr:
+ type: string
+ description: "boolean allows logs to stderr and files both"
+ v:
+ type: string
+ description: "verbosity level of clickhouse-operator log, default - 1 max - 9"
+ stderrthreshold:
+ type: string
+ vmodule:
+ type: string
+ description: |
+ Comma-separated list of filename=N, where filename (can be a pattern) must have no .go ext, and N is a V level.
+ Ex.: file*=2 sets the 'V' to 2 in all files with names like file*.
+ log_backtrace_at:
+ type: string
+ description: |
+ It can be set to a file and line number with a logging line.
+ Ex.: file.go:123
+ Each time when this line is being executed, a stack trace will be written to the Info log.
+---
+# Source: altinity-clickhouse-operator/templates/hooks/crd-install-rbac.yaml
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRole
+metadata:
+ name: ch-operator-altinity-clickhouse-operator-crd-install
+ labels:
+ helm.sh/chart: altinity-clickhouse-operator-0.25.6
+ app.kubernetes.io/name: altinity-clickhouse-operator
+ app.kubernetes.io/instance: ch-operator
+ app.kubernetes.io/version: "0.25.6"
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/component: crd-install-hook
+ annotations:
+ "helm.sh/hook": pre-install,pre-upgrade
+ "helm.sh/hook-weight": "-6"
+ "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
+rules:
+- apiGroups:
+ - apiextensions.k8s.io
+ resources:
+ - customresourcedefinitions
+ verbs:
+ - get
+ - list
+ - create
+ - update
+ - patch
+---
+# Source: altinity-clickhouse-operator/templates/hooks/crd-install-rbac.yaml
+apiVersion: rbac.authorization.k8s.io/v1
+kind: ClusterRoleBinding
+metadata:
+ name: ch-operator-altinity-clickhouse-operator-crd-install
+ labels:
+ helm.sh/chart: altinity-clickhouse-operator-0.25.6
+ app.kubernetes.io/name: altinity-clickhouse-operator
+ app.kubernetes.io/instance: ch-operator
+ app.kubernetes.io/version: "0.25.6"
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/component: crd-install-hook
+ annotations:
+ "helm.sh/hook": pre-install,pre-upgrade
+ "helm.sh/hook-weight": "-6"
+ "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
+roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: ClusterRole
+ name: ch-operator-altinity-clickhouse-operator-crd-install
+subjects:
+- kind: ServiceAccount
+ name: ch-operator-altinity-clickhouse-operator-crd-install
+ namespace: reports-clickhouse
+---
+# Source: altinity-clickhouse-operator/templates/hooks/crd-install-job.yaml
+apiVersion: batch/v1
+kind: Job
+metadata:
+ name: ch-operator-altinity-clickhouse-operator-crd-install
+ namespace: reports-clickhouse
+ labels:
+ helm.sh/chart: altinity-clickhouse-operator-0.25.6
+ app.kubernetes.io/name: altinity-clickhouse-operator
+ app.kubernetes.io/instance: ch-operator
+ app.kubernetes.io/version: "0.25.6"
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/component: crd-install-hook
+ annotations:
+ "helm.sh/hook": pre-install,pre-upgrade
+ "helm.sh/hook-weight": "-5"
+ "helm.sh/hook-delete-policy": before-hook-creation,hook-succeeded
+spec:
+ template:
+ metadata:
+ name: ch-operator-altinity-clickhouse-operator-crd-install
+ labels:
+ helm.sh/chart: altinity-clickhouse-operator-0.25.6
+ app.kubernetes.io/name: altinity-clickhouse-operator
+ app.kubernetes.io/instance: ch-operator
+ app.kubernetes.io/version: "0.25.6"
+ app.kubernetes.io/managed-by: Helm
+ app.kubernetes.io/component: crd-install-hook
+ spec:
+ serviceAccountName: ch-operator-altinity-clickhouse-operator-crd-install
+ restartPolicy: OnFailure
+ containers:
+ - name: crd-install
+ image: "bitnami/kubectl:latest"
+ imagePullPolicy: IfNotPresent
+ command:
+ - /bin/sh
+ - -c
+ - |
+ set -e
+ echo "Installing/Updating ClickHouse Operator CRDs..."
+ for crd_file in /crds/*.yaml; do
+ echo "Applying $(basename $crd_file)..."
+ kubectl apply --server-side=true --force-conflicts -f "$crd_file"
+ done
+ echo "CRD installation completed successfully"
+ volumeMounts:
+ - name: crds
+ mountPath: /crds
+ readOnly: true
+ volumes:
+ - name: crds
+ configMap:
+ name: ch-operator-altinity-clickhouse-operator-crds
diff --git a/clickhouse/base/kustomization.yaml b/clickhouse/base/kustomization.yaml
new file mode 100644
index 0000000..e45664b
--- /dev/null
+++ b/clickhouse/base/kustomization.yaml
@@ -0,0 +1,2 @@
+resources:
+- clickhouse-operator.yaml
diff --git a/clickhouse/local/kustomization.yaml b/clickhouse/local/kustomization.yaml
new file mode 100644
index 0000000..1f5c3aa
--- /dev/null
+++ b/clickhouse/local/kustomization.yaml
@@ -0,0 +1,2 @@
+resources:
+- ../app