namespaceOverride: "" # commonLabels -- set of labels that will be applied to all the resources for the operator commonLabels: {} # commonAnnotations -- set of annotations that will be applied to all the resources for the operator commonAnnotations: {} deployment: # look details in `kubectl explain deployment.spec.strategy` strategy: type: Recreate crdHook: # crdHook.enabled -- enable automatic CRD installation/update via pre-install/pre-upgrade hooks # when disabled, CRDs must be installed manually using kubectl apply enabled: true image: # crdHook.image.repository -- image repository for CRD installation job repository: bitnami/kubectl # crdHook.image.tag -- image tag for CRD installation job tag: "latest" # crdHook.image.pullPolicy -- image pull policy for CRD installation job pullPolicy: IfNotPresent # crdHook.resources -- resource limits and requests for CRD installation job resources: {} # limits: # cpu: 100m # memory: 128Mi # requests: # cpu: 100m # memory: 128Mi # crdHook.nodeSelector -- node selector for CRD installation job nodeSelector: {} # crdHook.tolerations -- tolerations for CRD installation job tolerations: [] # crdHook.affinity -- affinity for CRD installation job affinity: {} operator: image: # operator.image.repository -- image repository repository: altinity/clickhouse-operator # operator.image.tag -- image tag (chart's appVersion value will be used if not set) tag: "" # operator.image.pullPolicy -- image pull policy pullPolicy: IfNotPresent containerSecurityContext: {} # operator.resources -- custom resource configuration, check `kubectl explain pod.spec.containers.resources` for details resources: {} # limits: # cpu: 100m # memory: 128Mi # requests: # cpu: 100m # memory: 128Mi # operator.priorityClassName -- priority class name for the clickhouse-operator deployment, check `kubectl explain pod.spec.priorityClassName` for details # @default -- "" priorityClassName: "" # operator.env -- additional environment variables for the clickhouse-operator container in deployment # possible format value `[{"name": "SAMPLE", "value": "text"}]` env: [] metrics: enabled: true image: # metrics.image.repository -- image repository repository: altinity/metrics-exporter # metrics.image.tag -- image tag (chart's appVersion value will be used if not set) tag: "" # metrics.image.pullPolicy -- image pull policy pullPolicy: IfNotPresent containerSecurityContext: {} # metrics.resources -- custom resource configuration resources: {} # limits: # cpu: 100m # memory: 128Mi # requests: # cpu: 100m # memory: 128Mi # metrics.env -- additional environment variables for the deployment of metrics-exporter containers # possible format value `[{"name": "SAMPLE", "value": "text"}]` env: [] # imagePullSecrets -- image pull secret for private images in clickhouse-operator pod # possible value format `[{"name":"your-secret-name"}]`, # check `kubectl explain pod.spec.imagePullSecrets` for details imagePullSecrets: [] # podLabels -- labels to add to the clickhouse-operator pod podLabels: {} # podAnnotations -- annotations to add to the clickhouse-operator pod, check `kubectl explain pod.spec.annotations` for details # @default -- check the `values.yaml` file podAnnotations: prometheus.io/port: '8888' prometheus.io/scrape: 'true' clickhouse-operator-metrics/port: '9999' clickhouse-operator-metrics/scrape: 'true' # nameOverride -- override name of the chart nameOverride: "" # fullnameOverride -- full name of the chart. fullnameOverride: "" serviceAccount: # serviceAccount.create -- specifies whether a service account should be created create: true # serviceAccount.annotations -- annotations to add to the service account annotations: {} # serviceAccount.name -- the name of the service account to use; if not set and create is true, a name is generated using the fullname template name: rbac: # rbac.create -- specifies whether rbac resources should be created create: true # rbac.namespaceScoped -- specifies whether to create roles and rolebindings at the cluster level or namespace level namespaceScoped: false secret: # secret.create -- create a secret with operator credentials create: true # secret.username -- operator credentials username username: clickhouse_operator # secret.password -- operator credentials password password: clickhouse_operator_password # nodeSelector -- node for scheduler pod assignment, check `kubectl explain pod.spec.nodeSelector` for details nodeSelector: {} # tolerations -- tolerations for scheduler pod assignment, check `kubectl explain pod.spec.tolerations` for details tolerations: [] # affinity -- affinity for scheduler pod assignment, check `kubectl explain pod.spec.affinity` for details affinity: {} # podSecurityContext - operator deployment SecurityContext, check `kubectl explain pod.spec.securityContext` for details podSecurityContext: {} # topologySpreadConstraints - topologySpreadConstraints affinity for scheduler pod assignment, check `kubectl explain pod.spec.topologySpreadConstraints` for details topologySpreadConstraints: [] serviceMonitor: # serviceMonitor.enabled -- ServiceMonitor Custom resource is created for a [prometheus-operator](https://github.com/prometheus-operator/prometheus-operator) # In serviceMonitor will be created two endpoints ch-metrics on port 8888 and op-metrics # 9999. Ypu can specify interval, scrapeTimeout, relabelings, metricRelabelings for each endpoint below enabled: false # serviceMonitor.additionalLabels -- additional labels for service monitor additionalLabels: {} clickhouseMetrics: # serviceMonitor.interval for ch-metrics endpoint -- interval: 30s # serviceMonitor.scrapeTimeout for ch-metrics endpoint -- Prometheus ServiceMonitor scrapeTimeout. If empty, Prometheus uses the global scrape timeout unless it is less than the target's scrape interval value in which the latter is used. scrapeTimeout: "" # serviceMonitor.relabelings for ch-metrics endpoint -- Prometheus [RelabelConfigs] to apply to samples before scraping relabelings: [] # serviceMonitor.metricRelabelings for ch-metrics endpoint -- Prometheus [MetricRelabelConfigs] to apply to samples before ingestio metricRelabelings: [] operatorMetrics: # serviceMonitor.interval for op-metrics endpoint -- interval: 30s # serviceMonitor.scrapeTimeout for op-metrics endpoint -- Prometheus ServiceMonitor scrapeTimeout. If empty, Prometheus uses the global scrape timeout unless it is less than the target's scrape interval value in which the latter is used. scrapeTimeout: "" # serviceMonitor.relabelings for op-metrics endpoint -- Prometheus [RelabelConfigs] to apply to samples before scraping relabelings: [] # serviceMonitor.metricRelabelings for op-metrics endpoint -- Prometheus [MetricRelabelConfigs] to apply to samples before ingestio metricRelabelings: [] # configs -- clickhouse operator configs # @default -- check the `values.yaml` file for the config content (auto-generated from latest operator release) configs: confdFiles: null configdFiles: 01-clickhouse-01-listen.xml: | :: 0.0.0.0 1 01-clickhouse-02-logger.xml: | debug /var/log/clickhouse-server/clickhouse-server.log /var/log/clickhouse-server/clickhouse-server.err.log 1000M 10 1 01-clickhouse-03-query_log.xml: | system query_log
Engine = MergeTree PARTITION BY event_date ORDER BY event_time TTL event_date + interval 30 day 7500
01-clickhouse-04-part_log.xml: | system part_log
Engine = MergeTree PARTITION BY event_date ORDER BY event_time TTL event_date + interval 30 day 7500
01-clickhouse-05-trace_log.xml: |- system trace_log
Engine = MergeTree PARTITION BY event_date ORDER BY event_time TTL event_date + interval 30 day 7500
files: config.yaml: # IMPORTANT # This file is auto-generated # Do not edit this file - all changes would be lost # Edit appropriate template in the following folder: # deploy/builder/templates-config # IMPORTANT # # Template parameters available: # WATCH_NAMESPACES= # CH_USERNAME_PLAIN= # CH_PASSWORD_PLAIN= # CH_CREDENTIALS_SECRET_NAMESPACE= # CH_CREDENTIALS_SECRET_NAME=clickhouse-operator # VERBOSITY=1 ################################################ ## ## Watch section ## ################################################ watch: # List of namespaces where clickhouse-operator watches for events. # Concurrently running operators should watch on different namespaces. # IMPORTANT # Regexp is applicable. namespaces: [] clickhouse: configuration: ################################################ ## ## Configuration files section ## ################################################ file: # Each 'path' can be either absolute or relative. # In case path is absolute - it is used as is # In case path is relative - it is relative to the folder where configuration file you are reading right now is located. path: # Path to the folder where ClickHouse configuration files common for all instances within a CHI are located. common: chi/config.d # Path to the folder where ClickHouse configuration files unique for each instance (host) within a CHI are located. host: chi/conf.d # Path to the folder where ClickHouse configuration files with users' settings are located. # Files are common for all instances within a CHI. user: chi/users.d ################################################ ## ## Configuration users section ## ################################################ user: # Default settings for user accounts, created by the operator. # IMPORTANT. These are not access credentials or settings for 'default' user account, # it is a template for filling out missing fields for all user accounts to be created by the operator, # with the following EXCEPTIONS: # 1. 'default' user account DOES NOT use provided password, but uses all the rest of the fields. # Password for 'default' user account has to be provided explicitly, if to be used. # 2. CHOP user account DOES NOT use: # - profile setting. It uses predefined profile called 'clickhouse_operator' # - quota setting. It uses empty quota name. # - networks IP setting. Operator specifies 'networks/ip' user setting to match operators' pod IP only. # - password setting. Password for CHOP account is used from 'clickhouse.access.*' section default: # Default values for ClickHouse user account(s) created by the operator # 1. user/profile - string # 2. user/quota - string # 3. user/networks/ip - multiple strings # 4. user/password - string # These values can be overwritten on per-user basis. profile: "default" quota: "default" networksIP: - "::1" - "127.0.0.1" password: "default" ################################################ ## ## Configuration network section ## ################################################ network: # Default host_regexp to limit network connectivity from outside hostRegexpTemplate: "(chi-{chi}-[^.]+\\d+-\\d+|clickhouse\\-{chi})\\.{namespace}\\.svc\\.cluster\\.local$" ################################################ ## ## Configuration restart policy section ## Configuration restart policy describes what configuration changes require ClickHouse restart ## ################################################ configurationRestartPolicy: rules: # IMPORTANT! # Special version of "*" - default version - has to satisfy all ClickHouse versions. # Default version will also be used in case ClickHouse version is unknown. # ClickHouse version may be unknown due to host being down - for example, because of incorrect "settings" section. # ClickHouse is not willing to start in case incorrect/unknown settings are provided in config file. - version: "*" rules: # see https://kb.altinity.com/altinity-kb-setup-and-maintenance/altinity-kb-server-config-files/#server-config-configxml-sections-which-dont-require-restart # to be replaced with "select * from system.server_settings where changeable_without_restart = 'No'" - settings/*: "yes" # single values - settings/access_control_path: "no" - settings/dictionaries_config: "no" - settings/max_server_memory_*: "no" - settings/max_*_to_drop: "no" - settings/max_concurrent_queries: "no" - settings/models_config: "no" - settings/user_defined_executable_functions_config: "no" # structured XML - settings/logger/*: "no" - settings/macros/*: "no" - settings/remote_servers/*: "no" - settings/user_directories/*: "no" # these settings should not lead to pod restarts - settings/display_secrets_in_show_and_select: "no" - zookeeper/*: "no" - files/*.xml: "yes" - files/config.d/*.xml: "yes" - files/config.d/*dict*.xml: "no" - files/config.d/*no_restart*: "no" # exceptions in default profile - profiles/default/background_*_pool_size: "yes" - profiles/default/max_*_for_server: "yes" - version: "21.*" rules: - settings/logger: "yes" ################################################# ## ## Access to ClickHouse instances ## ################################################ access: # Possible values for 'scheme' are: # 1. http - force http to be used to connect to ClickHouse instances # 2. https - force https to be used to connect to ClickHouse instances # 3. auto - either http or https is selected based on open ports scheme: "auto" # ClickHouse credentials (username, password and port) to be used by the operator to connect to ClickHouse instances. # These credentials are used for: # 1. Metrics requests # 2. Schema maintenance # User with these credentials can be specified in additional ClickHouse .xml config files, # located in 'clickhouse.configuration.file.path.user' folder username: "" password: "" rootCA: "" # Location of the k8s Secret with username and password to be used by the operator to connect to ClickHouse instances. # Can be used instead of explicitly specified username and password available in sections: # - clickhouse.access.username # - clickhouse.access.password # Secret should have two keys: # 1. username # 2. password secret: # Empty `namespace` means that k8s secret would be looked in the same namespace where operator's pod is running. namespace: "" # Empty `name` means no k8s Secret would be looked for name: '{{ include "altinity-clickhouse-operator.fullname" . }}' # Port where to connect to ClickHouse instances to port: 8123 # Timeouts used to limit connection and queries from the operator to ClickHouse instances # Specified in seconds. timeouts: # Timout to setup connection from the operator to ClickHouse instances. In seconds. connect: 1 # Timout to perform SQL query from the operator to ClickHouse instances. In seconds. query: 4 ################################################ ## ## Addons specifies additional configuration sections ## Should it be called something like "templates"? ## ################################################ addons: rules: - version: "*" spec: configuration: users: profiles: quotas: settings: files: - version: ">= 23.3" spec: configuration: ### ### users.d is global while description depends on CH version which may vary on per-host basis ### In case of global-ness this may be better to implement via auto-templates ### ### As a solution, this may be applied on the whole cluster based on any of its hosts ### ### What to do when host is just created? CH version is not known prior to CH started and user config is required before CH started. ### We do not have any info about the cluster on initial creation ### users: "{clickhouseOperatorUser}/access_management": 1 "{clickhouseOperatorUser}/named_collection_control": 1 "{clickhouseOperatorUser}/show_named_collections": 1 "{clickhouseOperatorUser}/show_named_collections_secrets": 1 profiles: quotas: settings: files: - version: ">= 23.5" spec: configuration: users: profiles: clickhouse_operator/format_display_secrets_in_show_and_select: 1 quotas: settings: ## ## this may be added on per-host basis into host's conf.d folder ## display_secrets_in_show_and_select: 1 files: ################################################# ## ## Metrics collection ## ################################################ metrics: # Timeouts used to limit connection and queries from the metrics exporter to ClickHouse instances # Specified in seconds. timeouts: # Timeout used to limit metrics collection request. In seconds. # Upon reaching this timeout metrics collection is aborted and no more metrics are collected in this cycle. # All collected metrics are returned. collect: 9 keeper: configuration: ################################################ ## ## Configuration files section ## ################################################ file: # Each 'path' can be either absolute or relative. # In case path is absolute - it is used as is # In case path is relative - it is relative to the folder where configuration file you are reading right now is located. path: # Path to the folder where Keeper configuration files common for all instances within a CHK are located. common: chk/keeper_config.d # Path to the folder where Keeper configuration files unique for each instance (host) within a CHK are located. host: chk/conf.d # Path to the folder where Keeper configuration files with users' settings are located. # Files are common for all instances within a CHI. user: chk/users.d ################################################ ## ## Template(s) management section ## ################################################ template: chi: # CHI template updates handling policy # Possible policy values: # - ReadOnStart. Accept CHIT updates on the operator's start only. # - ApplyOnNextReconcile. Accept CHIT updates at all time. Apply new CHITs on next regular reconcile of the CHI policy: ApplyOnNextReconcile # Path to the folder where ClickHouseInstallation templates .yaml manifests are located. # Templates are added to the list of all templates and used when CHI is reconciled. # Templates are applied in sorted alpha-numeric order. path: chi/templates.d chk: # CHK template updates handling policy # Possible policy values: # - ReadOnStart. Accept CHIT updates on the operators start only. # - ApplyOnNextReconcile. Accept CHIT updates at all time. Apply new CHITs on next regular reconcile of the CHI policy: ApplyOnNextReconcile # Path to the folder where ClickHouseInstallation templates .yaml manifests are located. # Templates are added to the list of all templates and used when CHI is reconciled. # Templates are applied in sorted alpha-numeric order. path: chk/templates.d ################################################ ## ## Reconcile section ## ################################################ reconcile: # Reconcile runtime settings runtime: # Max number of concurrent CHI reconciles in progress reconcileCHIsThreadsNumber: 10 # The operator reconciles shards concurrently in each CHI with the following limitations: # 1. Number of shards being reconciled (and thus having hosts down) in each CHI concurrently # can not be greater than 'reconcileShardsThreadsNumber'. # 2. Percentage of shards being reconciled (and thus having hosts down) in each CHI concurrently # can not be greater than 'reconcileShardsMaxConcurrencyPercent'. # 3. The first shard is always reconciled alone. Concurrency starts from the second shard and onward. # Thus limiting number of shards being reconciled (and thus having hosts down) in each CHI by both number and percentage # Max number of concurrent shard reconciles within one cluster in progress reconcileShardsThreadsNumber: 5 # Max percentage of concurrent shard reconciles within one cluster in progress reconcileShardsMaxConcurrencyPercent: 50 # Reconcile StatefulSet scenario statefulSet: # Create StatefulSet scenario create: # What to do in case created StatefulSet is not in 'Ready' after `reconcile.statefulSet.update.timeout` seconds # Possible options: # 1. abort - abort the process, do nothing with the problematic StatefulSet, leave it as it is, # do not try to fix or delete or update it, just abort reconcile cycle. # Do not proceed to the next StatefulSet(s) and wait for an admin to assist. # 2. delete - delete newly created problematic StatefulSet and follow 'abort' path afterwards. # 3. ignore - ignore an error, pretend nothing happened, continue reconcile and move on to the next StatefulSet. onFailure: ignore # Update StatefulSet scenario update: # How many seconds to wait for created/updated StatefulSet to be 'Ready' timeout: 300 # How many seconds to wait between checks/polls for created/updated StatefulSet status pollInterval: 5 # What to do in case updated StatefulSet is not in 'Ready' after `reconcile.statefulSet.update.timeout` seconds # Possible options: # 1. abort - abort the process, do nothing with the problematic StatefulSet, leave it as it is, # do not try to fix or delete or update it, just abort reconcile cycle. # Do not proceed to the next StatefulSet(s) and wait for an admin to assist. # 2. rollback - delete Pod and rollback StatefulSet to previous Generation. # Pod would be recreated by StatefulSet based on rollback-ed StatefulSet configuration. # Follow 'abort' path afterwards. # 3. ignore - ignore an error, pretend nothing happened, continue reconcile and move on to the next StatefulSet. onFailure: abort # Reconcile Host scenario host: # The operator during reconcile procedure should wait for a ClickHouse host to achieve the following conditions: wait: # Whether the operator during reconcile procedure should wait for a ClickHouse host: # - to be excluded from a ClickHouse cluster # - to complete all running queries # - to be included into a ClickHouse cluster # respectfully before moving forward with host reconcile exclude: true queries: true include: false # The operator during reconcile procedure should wait for replicas to catch-up # replication delay a.k.a replication lag for the following replicas replicas: # All replicas (new and known earlier) are explicitly requested to wait for replication to catch-up all: no # New replicas only are requested to wait for replication to catch-up new: yes # Replication catch-up is considered to be completed as soon as replication delay # a.k.a replication lag - calculated as "MAX(absolute_delay) FROM system.replicas" # is within this specified delay (in seconds) delay: 10 probes: # Whether the operator during host launch procedure should wait for startup probe to succeed. # In case probe is unspecified wait is assumed to be completed successfully. # Default option value is to do not wait. startup: no # Whether the operator during host launch procedure should wait for readiness probe to succeed. # In case probe is unspecified wait is assumed to be completed successfully. # Default option value is to wait. readiness: yes # The operator during reconcile procedure should drop the following entities: drop: replicas: # Whether the operator during reconcile procedure should drop replicas when replica is deleted onDelete: yes # Whether the operator during reconcile procedure should drop replicas when replica volume is lost onLostVolume: yes # Whether the operator during reconcile procedure should drop active replicas when replica is deleted or recreated active: no ################################################ ## ## Annotations management section ## ################################################ annotation: # Applied when: # 1. Propagating annotations from the CHI's `metadata.annotations` to child objects' `metadata.annotations`, # 2. Propagating annotations from the CHI Template's `metadata.annotations` to CHI's `metadata.annotations`, # Include annotations from the following list: # Applied only when not empty. Empty list means "include all, no selection" include: [] # Exclude annotations from the following list: exclude: [] ################################################ ## ## Labels management section ## ################################################ label: # Applied when: # 1. Propagating labels from the CHI's `metadata.labels` to child objects' `metadata.labels`, # 2. Propagating labels from the CHI Template's `metadata.labels` to CHI's `metadata.labels`, # Include labels from the following list: # Applied only when not empty. Empty list means "include all, no selection" include: [] # Exclude labels from the following list: # Applied only when not empty. Empty list means "nothing to exclude, no selection" exclude: [] # Whether to append *Scope* labels to StatefulSet and Pod. # Full list of available *scope* labels check in 'labeler.go' # LabelShardScopeIndex # LabelReplicaScopeIndex # LabelCHIScopeIndex # LabelCHIScopeCycleSize # LabelCHIScopeCycleIndex # LabelCHIScopeCycleOffset # LabelClusterScopeIndex # LabelClusterScopeCycleSize # LabelClusterScopeCycleIndex # LabelClusterScopeCycleOffset appendScope: "no" ################################################ ## ## Metrics management section ## ################################################ metrics: labels: exclude: [] ################################################ ## ## Status management section ## ################################################ status: fields: action: false actions: false error: true errors: true ################################################ ## ## StatefulSet management section ## ################################################ statefulSet: revisionHistoryLimit: 0 ################################################ ## ## Pod management section ## ################################################ pod: # Grace period for Pod termination. # How many seconds to wait between sending # SIGTERM and SIGKILL during Pod termination process. # Increase this number is case of slow shutdown. terminationGracePeriod: 30 ################################################ ## ## Log parameters section ## ################################################ logger: logtostderr: "true" alsologtostderr: "false" v: "1" stderrthreshold: "" vmodule: "" log_backtrace_at: "" templatesdFiles: 001-templates.json.example: | { "apiVersion": "clickhouse.altinity.com/v1", "kind": "ClickHouseInstallationTemplate", "metadata": { "name": "01-default-volumeclaimtemplate" }, "spec": { "templates": { "volumeClaimTemplates": [ { "name": "chi-default-volume-claim-template", "spec": { "accessModes": [ "ReadWriteOnce" ], "resources": { "requests": { "storage": "2Gi" } } } } ], "podTemplates": [ { "name": "chi-default-oneperhost-pod-template", "distribution": "OnePerHost", "spec": { "containers" : [ { "name": "clickhouse", "image": "clickhouse/clickhouse-server:23.8", "ports": [ { "name": "http", "containerPort": 8123 }, { "name": "client", "containerPort": 9000 }, { "name": "interserver", "containerPort": 9009 } ] } ] } } ] } } } default-pod-template.yaml.example: | apiVersion: "clickhouse.altinity.com/v1" kind: "ClickHouseInstallationTemplate" metadata: name: "default-oneperhost-pod-template" spec: templates: podTemplates: - name: default-oneperhost-pod-template distribution: "OnePerHost" default-storage-template.yaml.example: | apiVersion: "clickhouse.altinity.com/v1" kind: "ClickHouseInstallationTemplate" metadata: name: "default-storage-template-2Gi" spec: templates: volumeClaimTemplates: - name: default-storage-template-2Gi spec: accessModes: - ReadWriteOnce resources: requests: storage: 2Gi readme: |- Templates in this folder are packaged with an operator and available via 'useTemplate' usersdFiles: 01-clickhouse-operator-profile.xml: | 0 1 10 0 0 02-clickhouse-default-profile.xml: |- 2 1 1000 1 1 1 nearest_hostname 0 keeperConfdFiles: null keeperConfigdFiles: 01-keeper-01-default-config.xml: | 10000 10000 information 100000 true /var/lib/clickhouse-keeper/coordination/logs /var/lib/clickhouse-keeper/coordination/snapshots /var/lib/clickhouse-keeper 2181 :: 0.0.0.0 1 1 information 4096 01-keeper-02-readiness.xml: | 9182 /ready 01-keeper-03-enable-reconfig.xml: |- false keeperTemplatesdFiles: readme: |- Templates in this folder are packaged with an operator and available via 'useTemplate' keeperUsersdFiles: null # additionalResources -- list of additional resources to create (processed via `tpl` function), # useful for create ClickHouse clusters together with clickhouse-operator. # check `kubectl explain chi` for details additionalResources: [] # - | # apiVersion: v1 # kind: ConfigMap # metadata: # name: {{ include "altinity-clickhouse-operator.fullname" . }}-cm # namespace: {{ include "altinity-clickhouse-operator.namespace" . }} # - | # apiVersion: v1 # kind: Secret # metadata: # name: {{ include "altinity-clickhouse-operator.fullname" . }}-s # namespace: {{ include "altinity-clickhouse-operator.namespace" . }} # stringData: # mykey: my-value # - | # apiVersion: clickhouse.altinity.com/v1 # kind: ClickHouseInstallation # metadata: # name: {{ include "altinity-clickhouse-operator.fullname" . }}-chi # namespace: {{ include "altinity-clickhouse-operator.namespace" . }} # spec: # configuration: # clusters: # - name: default # layout: # shardsCount: 1 dashboards: # dashboards.enabled -- provision grafana dashboards as configMaps (can be synced by grafana dashboards sidecar https://github.com/grafana/helm-charts/blob/grafana-8.3.4/charts/grafana/values.yaml#L778 ) enabled: false # dashboards.additionalLabels -- labels to add to a secret with dashboards additionalLabels: # dashboards.additionalLabels.grafana_dashboard - will watch when official grafana helm chart sidecar.dashboards.enabled=true grafana_dashboard: "1" # dashboards.annotations -- annotations to add to a secret with dashboards annotations: # dashboards.annotations.grafana_folder -- folder where will place dashboards, requires define values in official grafana helm chart sidecar.dashboards.folderAnnotation: grafana_folder grafana_folder: clickhouse-operator