From 0965859dec1e059f3e0c8d88c1c1ef8e742717f5 Mon Sep 17 00:00:00 2001 From: vijeyash Date: Tue, 21 Nov 2023 15:33:22 +0530 Subject: [PATCH 1/6] pvc changes --- agent/kubviz/kubePreUpgrade.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/agent/kubviz/kubePreUpgrade.go b/agent/kubviz/kubePreUpgrade.go index 11ec310f..20da065f 100644 --- a/agent/kubviz/kubePreUpgrade.go +++ b/agent/kubviz/kubePreUpgrade.go @@ -81,7 +81,7 @@ func publishK8sDepricated_Deleted_Api(result *model.Result, js nats.JetStreamCon func KubePreUpgradeDetector(config *rest.Config, js nats.JetStreamContext) error { pvcMountPath := "/mnt/agent/kbz" uniqueDir := fmt.Sprintf("%s/kubepug", pvcMountPath) - err := os.Mkdir(uniqueDir, 0755) + err := os.MkdirAll(uniqueDir, 0755) if err != nil { return err } From 030407fd4362494319d87e2eaafc6b0791281e08 Mon Sep 17 00:00:00 2001 From: vijeyash Date: Tue, 28 Nov 2023 16:42:09 +0530 Subject: [PATCH 2/6] changed image to get shell access --- dockerfiles/client/Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/dockerfiles/client/Dockerfile b/dockerfiles/client/Dockerfile index 31af58c8..fd9f5b18 100644 --- a/dockerfiles/client/Dockerfile +++ b/dockerfiles/client/Dockerfile @@ -11,7 +11,7 @@ RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 GO111MODULE=on go build -a -o k8smetri # Use distroless as minimal base image to package the manager binary # Refer to https://github.com/GoogleContainerTools/distroless for more details -FROM gcr.io/distroless/static:nonroot +FROM golang:alpine WORKDIR / COPY --from=builder /workspace/k8smetrics_client . USER 65532:65532 From 482c19c9c73bafc638861f666c8e5e49f9ee6cfe Mon Sep 17 00:00:00 2001 From: Akash LM Date: Mon, 4 Dec 2023 17:35:01 +0530 Subject: [PATCH 3/6] updated Clickhouse chart --- .github/workflows/helm_release.yml | 1 + charts/clickhouse/.helmignore | 0 charts/clickhouse/Chart.yaml | 29 +- charts/clickhouse/README.md | 652 ++++++-- charts/clickhouse/templates/NOTES.txt | 89 +- charts/clickhouse/templates/_helpers.tpl | 221 ++- .../templates/configmap-config.yaml | 112 -- .../clickhouse/templates/configmap-extra.yaml | 20 + .../templates/configmap-metrika.yaml | 77 - .../templates/configmap-users-extra.yaml | 20 + .../clickhouse/templates/configmap-users.yaml | 68 - charts/clickhouse/templates/configmap.yaml | 20 + .../templates/deployment-tabix.yaml | 85 - charts/clickhouse/templates/extra-list.yaml | 9 + .../templates/ingress-clickhouse.yaml | 27 - .../clickhouse/templates/ingress-tabix.yaml | 29 - .../templates/ingress-tls-secrets.yaml | 44 + charts/clickhouse/templates/ingress.yaml | 59 + .../templates/init-scripts-secret.yaml | 19 + .../clickhouse/templates/prometheusrule.yaml | 24 + .../templates/scripts-configmap.yaml | 34 + .../clickhouse/templates/service-account.yaml | 19 + .../templates/service-external-access.yaml | 155 ++ .../templates/service-headless.yaml | 69 + charts/clickhouse/templates/service.yaml | 152 ++ .../clickhouse/templates/servicemonitor.yaml | 47 + .../templates/start-scripts-secret.yaml | 19 + .../statefulset-clickhouse-replica.yaml | 184 --- .../templates/statefulset-clickhouse.yaml | 182 --- charts/clickhouse/templates/statefulset.yaml | 425 +++++ .../templates/svc-clickhouse-headless.yaml | 26 - .../svc-clickhouse-replica-headless.yaml | 26 - .../templates/svc-clickhouse-replica.yaml | 25 - .../clickhouse/templates/svc-clickhouse.yaml | 25 - charts/clickhouse/templates/svc-tabix.yaml | 19 - charts/clickhouse/templates/tls-secret.yaml | 29 + charts/clickhouse/values.yaml | 1455 +++++++++++++---- 37 files changed, 3045 insertions(+), 1451 deletions(-) mode change 100755 => 100644 charts/clickhouse/.helmignore mode change 100755 => 100644 charts/clickhouse/Chart.yaml mode change 100755 => 100644 charts/clickhouse/README.md mode change 100755 => 100644 charts/clickhouse/templates/NOTES.txt mode change 100755 => 100644 charts/clickhouse/templates/_helpers.tpl delete mode 100755 charts/clickhouse/templates/configmap-config.yaml create mode 100644 charts/clickhouse/templates/configmap-extra.yaml delete mode 100755 charts/clickhouse/templates/configmap-metrika.yaml create mode 100644 charts/clickhouse/templates/configmap-users-extra.yaml delete mode 100755 charts/clickhouse/templates/configmap-users.yaml create mode 100644 charts/clickhouse/templates/configmap.yaml delete mode 100755 charts/clickhouse/templates/deployment-tabix.yaml create mode 100644 charts/clickhouse/templates/extra-list.yaml delete mode 100755 charts/clickhouse/templates/ingress-clickhouse.yaml delete mode 100755 charts/clickhouse/templates/ingress-tabix.yaml create mode 100644 charts/clickhouse/templates/ingress-tls-secrets.yaml create mode 100644 charts/clickhouse/templates/ingress.yaml create mode 100644 charts/clickhouse/templates/init-scripts-secret.yaml create mode 100644 charts/clickhouse/templates/prometheusrule.yaml create mode 100644 charts/clickhouse/templates/scripts-configmap.yaml create mode 100644 charts/clickhouse/templates/service-account.yaml create mode 100644 charts/clickhouse/templates/service-external-access.yaml create mode 100644 charts/clickhouse/templates/service-headless.yaml create mode 100644 charts/clickhouse/templates/service.yaml create mode 100644 charts/clickhouse/templates/servicemonitor.yaml create mode 100644 charts/clickhouse/templates/start-scripts-secret.yaml delete mode 100755 charts/clickhouse/templates/statefulset-clickhouse-replica.yaml delete mode 100755 charts/clickhouse/templates/statefulset-clickhouse.yaml create mode 100644 charts/clickhouse/templates/statefulset.yaml delete mode 100755 charts/clickhouse/templates/svc-clickhouse-headless.yaml delete mode 100755 charts/clickhouse/templates/svc-clickhouse-replica-headless.yaml delete mode 100755 charts/clickhouse/templates/svc-clickhouse-replica.yaml delete mode 100755 charts/clickhouse/templates/svc-clickhouse.yaml delete mode 100755 charts/clickhouse/templates/svc-tabix.yaml create mode 100644 charts/clickhouse/templates/tls-secret.yaml mode change 100755 => 100644 charts/clickhouse/values.yaml diff --git a/.github/workflows/helm_release.yml b/.github/workflows/helm_release.yml index 597ddaf8..f16b4250 100644 --- a/.github/workflows/helm_release.yml +++ b/.github/workflows/helm_release.yml @@ -23,6 +23,7 @@ jobs: - name: Add Helm repos run: | helm repo add tools https://kube-tarian.github.io/helmrepo-supporting-tools + helm repo add bitnami https://charts.bitnami.com/bitnami - name: Run chart-releaser uses: helm/chart-releaser-action@v1.1.0 diff --git a/charts/clickhouse/.helmignore b/charts/clickhouse/.helmignore old mode 100755 new mode 100644 diff --git a/charts/clickhouse/Chart.yaml b/charts/clickhouse/Chart.yaml old mode 100755 new mode 100644 index e6ff5829..83ead92e --- a/charts/clickhouse/Chart.yaml +++ b/charts/clickhouse/Chart.yaml @@ -1,16 +1,23 @@ -appVersion: "19.14" -description: ClickHouse is an open source column-oriented database management system - capable of real time generation of analytical data reports using SQL queries -home: https://clickhouse.yandex/ -icon: https://clickhouse.yandex/images/logo.png +apiVersion: v2 +appVersion: 23.10.5 +dependencies: +- name: common + repository: https://charts.bitnami.com/bitnami + tags: + - bitnami-common + version: 2.x.x +description: ClickHouse is an open-source column-oriented OLAP database management + system. Use it to boost your database performance while providing linear scalability + and hardware efficiency. +home: https://bitnami.com +icon: https://bitnami.com/assets/stacks/clickhouse/img/clickhouse-stack-220x234.png keywords: -- clickhouse -- olap - database +- sharding maintainers: -- email: 411934049@qq.com - name: liwenhe +- name: VMware, Inc. + url: https://github.com/bitnami/charts name: clickhouse sources: -- https://github.com/liwenhe1993/charts -version: 1.0.2 +- https://github.com/bitnami/charts/tree/main/bitnami/clickhouse +version: 1.0.3 diff --git a/charts/clickhouse/README.md b/charts/clickhouse/README.md old mode 100755 new mode 100644 index ec22a59f..ef654f7e --- a/charts/clickhouse/README.md +++ b/charts/clickhouse/README.md @@ -1,169 +1,529 @@ -# ClickHouse + -[ClickHouse](https://clickhouse.yandex/) is an open source column-oriented database management system capable of real time generation of analytical data reports using SQL queries. +# Bitnami package for ClickHouse + +ClickHouse is an open-source column-oriented OLAP database management system. Use it to boost your database performance while providing linear scalability and hardware efficiency. + +[Overview of ClickHouse](https://clickhouse.com/) + +Trademarks: This software listing is packaged by Bitnami. The respective trademarks mentioned in the offering are owned by the respective companies, and use of them does not imply any affiliation or endorsement. + +## TL;DR + +```console +helm install my-release oci://registry-1.docker.io/bitnamicharts/clickhouse +``` + +Looking to use ClickHouse in production? Try [VMware Tanzu Application Catalog](https://bitnami.com/enterprise), the enterprise edition of Bitnami Application Catalog. ## Introduction -This chart bootstraps a [ClickHouse](https://clickhouse.yandex/) replication cluster deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +Bitnami charts for Helm are carefully engineered, actively maintained and are the quickest and easiest way to deploy containers on a Kubernetes cluster that are ready to handle production workloads. + +This chart bootstraps a [ClickHouse](https://github.com/clickhouse/clickhouse) Deployment in a [Kubernetes](https://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.dev/) for deployment and management of Helm Charts in clusters. + +[Learn more about the default configuration of the chart](https://docs.bitnami.com/kubernetes/infrastructure/clickhouse/get-started/). ## Prerequisites -- Kubernetes 1.10+ +- Kubernetes 1.23+ +- Helm 3.8.0+ - PV provisioner support in the underlying infrastructure +- ReadWriteMany volumes for deployment scaling + +> If you are using Kubernetes 1.18, the following code needs to be commented out. +> seccompProfile: +> type: "RuntimeDefault" ## Installing the Chart To install the chart with the release name `my-release`: -```bash -$ helm repo add liwenhe https://liwenhe1993.github.io/charts/ -$ helm repo update -$ helm install --name clickhouse liwenhe/clickhouse +```console +helm install my-release oci://REGISTRY_NAME/REPOSITORY_NAME/clickhouse ``` -These commands deploy Clickhouse on the Kubernetes cluster in the default configuration. The [configuration](#configuration) section lists the parameters that can be configured during installation. + +> Note: You need to substitute the placeholders `REGISTRY_NAME` and `REPOSITORY_NAME` with a reference to your Helm chart registry and repository. For example, in the case of Bitnami, you need to use `REGISTRY_NAME=registry-1.docker.io` and `REPOSITORY_NAME=bitnamicharts`. + +The command deploys ClickHouse on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation. > **Tip**: List all releases using `helm list` ## Uninstalling the Chart -To uninstall/delete the `clickhouse` deployment: +To uninstall/delete the `my-release` deployment: -```bash -$ helm delete --purge clickhouse +```console +helm delete my-release ``` The command removes all the Kubernetes components associated with the chart and deletes the release. -## Configuration - -The following tables lists the configurable parameters of the Clickhouse chart and their default values. - -| Parameter | Description | Default | -| --------------------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------ | ----------------------------------------------------- | -| `timezone` | World time and date for cities in all time zones | `Asia/Shanghai` | -| `clusterDomain` | Kubernetes cluster domain | `cluster.local` | -| `affinity` | Clickhouse Node selectors and tolerations for pod assignment | `nil` | -| `clickhouse.podManagementPolicy` | StatefulSet controller supports relax its ordering guarantees while preserving its uniqueness and identity guarantees | `Parallel` | -| `clickhouse.updateStrategy` | StatefulSet controller supports automated updates. There are two valid update strategies: RollingUpdate and OnDelete | `RollingUpdate` | -| `clickhouse.rollingUpdatePartition` | Partition update strategy | `nil` | -| `clickhouse.path` | The path to the directory containing data | `/var/lib/clickhouse` | -| `clickhouse.http_port` | The port for connecting to the server over HTTP | `8123` | -| `clickhouse.tcp_port` | Port for communicating with clients over the TCP protocol | `9000` | -| `clickhouse.interserver_http_port` | Port for exchanging data between ClickHouse servers | `9009` | -| `clickhouse.replicas` | The instance number of Clickhouse | `3` | -| `clickhouse.image` | Docker image for Clickhouse | `yandex/clickhouse-server` | -| `clickhouse.imageVersion` | Docker image version for Clickhouse | `19.14` | -| `clickhouse.imagePullPolicy` | Image pull policy. One of Always, Never, IfNotPresent | `IfNotPresent` | -| `clickhouse.livenessProbe.enabled` | Turn on and off liveness probe | `true` | -| `clickhouse.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | `30` | -| `clickhouse.livenessProbe.periodSeconds` | How often to perform the probe | `30` | -| `clickhouse.livenessProbe.timeoutSeconds` | When the probe times out | `5` | -| `clickhouse.livenessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` | -| `clickhouse.livenessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` | -| `clickhouse.readinessProbe.enabled` | Turn on and off readiness probe | `true` | -| `clickhouse.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | `30` | -| `clickhouse.readinessProbe.periodSeconds` | How often to perform the probe | `30` | -| `clickhouse.readinessProbe.timeoutSeconds` | When the probe times out | `5` | -| `clickhouse.readinessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` | -| `clickhouse.readinessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` | -| `clickhouse.persistentVolumeClaim.enabled` | Enable persistence using a `PersistentVolumeClaim` | `false` | -| `clickhouse.persistentVolumeClaim.dataPersistentVolume.enabled` | Turn on and off dataPersistentVolume | `false` | -| `clickhouse.persistentVolumeClaim.dataPersistentVolume.accessModes` | Persistent Volume Access Modes | `[ReadWriteOnce]` | -| `clickhouse.persistentVolumeClaim.dataPersistentVolume.storageClassName` | Persistent Volume Storage Class | `` | -| `clickhouse.persistentVolumeClaim.dataPersistentVolume.storage` | Persistent Volume Size | `500Gi` | -| `clickhouse.persistentVolumeClaim.logsPersistentVolume.enabled` | Turn on and off dataPersistentVolume | `false` | -| `clickhouse.persistentVolumeClaim.logsPersistentVolume.accessModes` | Persistent Volume Access Modes | `[ReadWriteOnce]` | -| `clickhouse.persistentVolumeClaim.logsPersistentVolume.storageClassName` | Persistent Volume Storage Class | `` | -| `clickhouse.persistentVolumeClaim.logsPersistentVolume.storage` | Persistent Volume Size | `50Gi` | -| `clickhouse.ingress.enabled` | Enable ingress | `false` | -| `clickhouse.ingress.host` | Ingress host | `` | -| `clickhouse.ingress.path` | Ingress path | `` | -| `clickhouse.ingress.tls.enabled` | Enable ingress tls | `false` | -| `clickhouse.ingress.tls.hosts` | Ingress tls hosts | `[]` | -| `clickhouse.ingress.tls.secretName` | Ingress tls `secretName` | `` | -| `clickhouse.configmap.enabled` | If Configmap's enabled is `true`, Custom `config.xml`, `metrica.xml` and `users.xml` | `true` | -| `clickhouse.configmap.max_connections` | The maximum number of inbound connections | `4096` | -| `clickhouse.configmap.keep_alive_timeout` | The number of seconds that ClickHouse waits for incoming requests before closing the connection | `3` | -| `clickhouse.configmap.max_concurrent_queries` | The maximum number of simultaneously processed requests | `100` | -| `clickhouse.configmap.uncompressed_cache_size` | Cache size (in bytes) for uncompressed data used by table engines from the MergeTree | `8589934592` | -| `clickhouse.configmap.mark_cache_size` | Approximate size (in bytes) of the cache of "marks" used by MergeTree | `5368709120` | -| `clickhouse.configmap.umask` | Number is always parsed as octal. Default umask is 027 (other users cannot read logs, data files, etc; group can only read) | `022` | -| `clickhouse.configmap.mlock_executable` | Enabling this option is recommended but will lead to increased startup time for up to a few seconds | `false` | -| `clickhouse.configmap.builtin_dictionaries_reload_interval` | The interval in seconds before reloading built-in dictionaries | `3600` | -| `clickhouse.configmap.max_session_timeout` | Maximum session timeout, in seconds | `3600` | -| `clickhouse.configmap.default_session_timeout` | Default session timeout, in seconds | `60` | -| `clickhouse.configmap.disable_internal_dns_cache` | Uncomment to disable ClickHouse internal DNS caching | `1` | -| `clickhouse.configmap.max_open_files` | The maximum number of open files | `` | -| `clickhouse.configmap.interserver_http_host` | The host name that can be used by other servers to access this server | `` | -| `clickhouse.configmap.logger.path` | The log file path | `/var/log/clickhouse-server` | -| `clickhouse.configmap.logger.level` | Logging level. Acceptable values: trace, debug, information, warning, error | `trace` | -| `clickhouse.configmap.logger.size` | Size of the file | `1000M` | -| `clickhouse.configmap.logger.count` | The number of archived log files that ClickHouse stores | `10` | -| `clickhouse.configmap.compression.enabled` | Enable data compression settings | `false` | -| `clickhouse.configmap.compression.cases[].min_part_size` | The minimum size of a table part | `10000000000` | -| `clickhouse.configmap.compression.cases[].min_part_size_ratio` | The ratio of the minimum size of a table part to the full size of the table | `0.01` | -| `clickhouse.configmap.compression.cases[].method` | Compression method. Acceptable values ​: lz4 or zstd(experimental) | `zstd` | -| `clickhouse.configmap.zookeeper_servers.enabled` | Enable contains settings that allow ClickHouse to interact with a ZooKeeper cluster | `false` | -| `clickhouse.configmap.zookeeper_servers.session_timeout_ms` | Maximum timeout for the client session in milliseconds | `30000` | -| `clickhouse.configmap.zookeeper_servers.operation_timeout_ms` | Operation timeout for the client session in milliseconds | `10000` | -| `clickhouse.configmap.zookeeper_servers.root` | The znode that is used as the root for znodes used by the ClickHouse server. Optional | `` | -| `clickhouse.configmap.zookeeper_servers.identity` | User and password, that can be required by ZooKeeper to give access to requested znodes. Optional | `` | -| `clickhouse.configmap.zookeeper_servers.config[].index` | ZooKeeper index | `` | -| `clickhouse.configmap.zookeeper_servers.config[].host` | ZooKeeper host | `` | -| `clickhouse.configmap.zookeeper_servers.config[].port` | ZooKeeper port | `` | -| `clickhouse.configmap.remote_servers.enabled` | Enable configuration of clusters used by the Distributed table engine | `true` | -| `clickhouse.configmap.remote_servers.internal_replication` | If this parameter is set to 'true', the table where data will be written is going to replicate them itself | `false` | -| `clickhouse.configmap.remote_servers.replica.user` | Name of the user for connecting to a remote server. Access is configured in the users.xml file. | `default` | -| `clickhouse.configmap.remote_servers.replica.password` | The password for connecting to a remote server (not masked). | `nil` | -| `clickhouse.configmap.remote_servers.replica.compression` | Use data compression. | `true` | -| `clickhouse.configmap.remote_servers.replica.backup.enabled` | Enable replica backup | `false` | -| `clickhouse.configmap.remote_servers.graphite.enabled` | Enable graphite | `false` | -| `clickhouse.configmap.remote_servers.graphite.config[].timeout` | The timeout for sending data, in seconds | `0.1` | -| `clickhouse.configmap.remote_servers.graphite.config[].interval` | The interval for sending, in seconds | `60` | -| `clickhouse.configmap.remote_servers.graphite.config[].root_path` | Prefix for keys | `one_min` | -| `clickhouse.configmap.remote_servers.graphite.config[].metrics` | Sending data from a :ref:system_tables-system.metrics table | `true` | -| `clickhouse.configmap.remote_servers.graphite.config[].events` | Sending deltas data accumulated for the time period from a :ref:system_tables-system.events table | `true` | -| `clickhouse.configmap.remote_servers.graphite.config[].events_cumulative` | Sending cumulative data from a :ref:system_tables-system.events table | `true` | -| `clickhouse.configmap.remote_servers.graphite.config[].asynchronous_metrics` | Sending data from a :ref:system_tables-system.asynchronous_metrics table | `true` | -| `clickhouse.configmap.profiles.enabled` | Enable a settings profiles | `false` | -| `clickhouse.configmap.profiles.profile[].name` | Tne name of a settings profile | `` | -| `clickhouse.configmap.profiles.profile[].config` | The config of a settings profile | `{}` | -| `clickhouse.configmap.users.enabled` | Enable a settings users | `false` | -| `clickhouse.configmap.users.user[].name` | Tne name of a settings user | `` | -| `clickhouse.configmap.users.user[].config` | Tne config of a settings user | `{}` | -| `clickhouse.configmap.quotas.enabled` | Enable a settings quotas | `false` | -| `clickhouse.configmap.quotas.quota[].name` | Tne name of a settings quota | `` | -| `clickhouse.configmap.quotas.quota[].config[]` | Tne config of a settings quota | `[]` | -| `tabix.enabled` | Enable tabix | `false` | -| `tabix.replicas` | The instance number of Tabix | `1` | -| `tabix.updateStrategy.type` | Type of deployment. Can be "Recreate" or "RollingUpdate". Default is RollingUpdate | `RollingUpdate` | -| `tabix.updateStrategy.maxSurge` | The maximum number of pods that can be scheduled above the desired number of pods | `3` | -| `tabix.updateStrategy.maxUnavailable` | The maximum number of pods that can be unavailable during the update | `1` | -| `tabix.image` | Docker image name | `spoonest/clickhouse-tabix-web-client` | -| `tabix.imageVersion` | Docker image version | `stable` | -| `tabix.imagePullPolicy` | Dcoker image pull policy | `IfNotPresent` | -| `tabix.livenessProbe.enabled` | Turn on and off liveness probe | `true` | -| `tabix.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | `30` | -| `tabix.livenessProbe.periodSeconds` | How often to perform the probe | `30` | -| `tabix.livenessProbe.timeoutSeconds` | When the probe times out | `5` | -| `tabix.livenessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` | -| `tabix.livenessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` | -| `tabix.readinessProbe.enabled` | Turn on and off readiness probe | `true` | -| `tabix.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | `30` | -| `tabix.readinessProbe.periodSeconds` | How often to perform the probe | `30` | -| `tabix.readinessProbe.timeoutSeconds` | When the probe times out | `5` | -| `tabix.readinessProbe.failureThreshold` | Minimum consecutive successes for the probe | `3` | -| `tabix.readinessProbe.successThreshold` | Minimum consecutive failures for the probe | `1` | -| `tabix.security.user` | Tabix login username | `admin` | -| `tabix.security.password` | Tabix login password | `admin` | -| `tabix.automaticConnection.chName` | Automatic connection Clickhouse name | `` | -| `tabix.automaticConnection.chHost` | Automatic connection Clickhouse host | `` | -| `tabix.automaticConnection.chLogin` | Automatic connection Clickhouse login username | `` | -| `tabix.automaticConnection.chPassword` | Automatic connection Clickhouse login password | `` | -| `tabix.automaticConnection.chParams` | Automatic connection Clickhouse params | `` | -| `tabix.ingress.enabled` | Enable ingress | `false` | -| `tabix.ingress.host` | Ingress host | `` | -| `tabix.ingress.path` | Ingress path | `` | -| `tabix.ingress.tls.enabled` | Enable ingress tls | `false` | -| `tabix.ingress.tls.hosts` | Ingress tls hosts | `[]` | - -For more information please refer to the [liwenhe1993/charts](https://github.com/liwenhe1993/charts.git) documentation. +## Parameters + +### Global parameters + +| Name | Description | Value | +| ------------------------- | ----------------------------------------------- | ----- | +| `global.imageRegistry` | Global Docker image registry | `""` | +| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` | +| `global.storageClass` | Global StorageClass for Persistent Volume(s) | `""` | + +### Common parameters + +| Name | Description | Value | +| ------------------------ | --------------------------------------------------------------------------------------- | --------------- | +| `kubeVersion` | Override Kubernetes version | `""` | +| `nameOverride` | String to partially override common.names.name | `""` | +| `fullnameOverride` | String to fully override common.names.fullname | `""` | +| `namespaceOverride` | String to fully override common.names.namespace | `""` | +| `commonLabels` | Labels to add to all deployed objects | `{}` | +| `commonAnnotations` | Annotations to add to all deployed objects | `{}` | +| `clusterDomain` | Kubernetes cluster domain name | `cluster.local` | +| `extraDeploy` | Array of extra objects to deploy with the release | `[]` | +| `diagnosticMode.enabled` | Enable diagnostic mode (all probes will be disabled and the command will be overridden) | `false` | +| `diagnosticMode.command` | Command to override all containers in the deployment | `["sleep"]` | +| `diagnosticMode.args` | Args to override all containers in the deployment | `["infinity"]` | + +### ClickHouse Parameters + +| Name | Description | Value | +| --------------------------------------------------- | ---------------------------------------------------------------------------------------------------------- | ---------------------------- | +| `image.registry` | ClickHouse image registry | `REGISTRY_NAME` | +| `image.repository` | ClickHouse image repository | `REPOSITORY_NAME/clickhouse` | +| `image.digest` | ClickHouse image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | +| `image.pullPolicy` | ClickHouse image pull policy | `IfNotPresent` | +| `image.pullSecrets` | ClickHouse image pull secrets | `[]` | +| `image.debug` | Enable ClickHouse image debug mode | `false` | +| `shards` | Number of ClickHouse shards to deploy | `2` | +| `replicaCount` | Number of ClickHouse replicas per shard to deploy | `3` | +| `distributeReplicasByZone` | Schedules replicas of the same shard to different availability zones | `false` | +| `containerPorts.http` | ClickHouse HTTP container port | `8123` | +| `containerPorts.https` | ClickHouse HTTPS container port | `8443` | +| `containerPorts.tcp` | ClickHouse TCP container port | `9000` | +| `containerPorts.tcpSecure` | ClickHouse TCP (secure) container port | `9440` | +| `containerPorts.keeper` | ClickHouse keeper TCP container port | `2181` | +| `containerPorts.keeperSecure` | ClickHouse keeper TCP (secure) container port | `3181` | +| `containerPorts.keeperInter` | ClickHouse keeper interserver TCP container port | `9444` | +| `containerPorts.mysql` | ClickHouse MySQL container port | `9004` | +| `containerPorts.postgresql` | ClickHouse PostgreSQL container port | `9005` | +| `containerPorts.interserver` | ClickHouse Interserver container port | `9009` | +| `containerPorts.metrics` | ClickHouse metrics container port | `8001` | +| `livenessProbe.enabled` | Enable livenessProbe on ClickHouse containers | `true` | +| `livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `10` | +| `livenessProbe.periodSeconds` | Period seconds for livenessProbe | `10` | +| `livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `1` | +| `livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `3` | +| `livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | +| `readinessProbe.enabled` | Enable readinessProbe on ClickHouse containers | `true` | +| `readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `10` | +| `readinessProbe.periodSeconds` | Period seconds for readinessProbe | `10` | +| `readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `1` | +| `readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `3` | +| `readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | +| `startupProbe.enabled` | Enable startupProbe on ClickHouse containers | `false` | +| `startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `10` | +| `startupProbe.periodSeconds` | Period seconds for startupProbe | `10` | +| `startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `1` | +| `startupProbe.failureThreshold` | Failure threshold for startupProbe | `3` | +| `startupProbe.successThreshold` | Success threshold for startupProbe | `1` | +| `customLivenessProbe` | Custom livenessProbe that overrides the default one | `{}` | +| `customReadinessProbe` | Custom readinessProbe that overrides the default one | `{}` | +| `customStartupProbe` | Custom startupProbe that overrides the default one | `{}` | +| `resources.limits` | The resources limits for the ClickHouse containers | `{}` | +| `resources.requests` | The requested resources for the ClickHouse containers | `{}` | +| `podSecurityContext.enabled` | Enabled ClickHouse pods' Security Context | `true` | +| `podSecurityContext.fsGroup` | Set ClickHouse pod's Security Context fsGroup | `1001` | +| `containerSecurityContext.enabled` | Enable containers' Security Context | `true` | +| `containerSecurityContext.runAsUser` | Set containers' Security Context runAsUser | `1001` | +| `containerSecurityContext.runAsNonRoot` | Set containers' Security Context runAsNonRoot | `true` | +| `containerSecurityContext.readOnlyRootFilesystem` | Set read only root file system pod's | `false` | +| `containerSecurityContext.privileged` | Set contraller container's Security Context privileged | `false` | +| `containerSecurityContext.allowPrivilegeEscalation` | Set contraller container's Security Context allowPrivilegeEscalation | `false` | +| `containerSecurityContext.capabilities.drop` | List of capabilities to be droppedn | `["ALL"]` | +| `containerSecurityContext.seccompProfile.type` | Set container's Security Context seccomp profile | `RuntimeDefault` | +| `auth.username` | ClickHouse Admin username | `default` | +| `auth.password` | ClickHouse Admin password | `""` | +| `auth.existingSecret` | Name of a secret containing the Admin password | `""` | +| `auth.existingSecretKey` | Name of the key inside the existing secret | `""` | +| `logLevel` | Logging level | `information` | + +### ClickHouse keeper configuration parameters + +| Name | Description | Value | +| ------------------------------- | ------------------------------------------------------------------------------------------------------------------------ | ----------------------- | +| `keeper.enabled` | Deploy ClickHouse keeper. Support is experimental. | `false` | +| `defaultConfigurationOverrides` | Default configuration overrides (evaluated as a template) | `""` | +| `existingOverridesConfigmap` | The name of an existing ConfigMap with your custom configuration for ClickHouse | `""` | +| `extraOverrides` | Extra configuration overrides (evaluated as a template) apart from the default | `""` | +| `extraOverridesConfigmap` | The name of an existing ConfigMap with extra configuration for ClickHouse | `""` | +| `extraOverridesSecret` | The name of an existing ConfigMap with your custom configuration for ClickHouse | `""` | +| `usersExtraOverrides` | Users extra configuration overrides (evaluated as a template) apart from the default | `""` | +| `usersExtraOverridesConfigmap` | The name of an existing ConfigMap with users extra configuration for ClickHouse | `""` | +| `usersExtraOverridesSecret` | The name of an existing ConfigMap with your custom users configuration for ClickHouse | `""` | +| `initdbScripts` | Dictionary of initdb scripts | `{}` | +| `initdbScriptsSecret` | ConfigMap with the initdb scripts (Note: Overrides `initdbScripts`) | `""` | +| `startdbScripts` | Dictionary of startdb scripts | `{}` | +| `startdbScriptsSecret` | ConfigMap with the startdb scripts (Note: Overrides `startdbScripts`) | `""` | +| `command` | Override default container command (useful when using custom images) | `["/scripts/setup.sh"]` | +| `args` | Override default container args (useful when using custom images) | `[]` | +| `hostAliases` | ClickHouse pods host aliases | `[]` | +| `podLabels` | Extra labels for ClickHouse pods | `{}` | +| `podAnnotations` | Annotations for ClickHouse pods | `{}` | +| `podAffinityPreset` | Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `soft` | +| `nodeAffinityPreset.type` | Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `nodeAffinityPreset.key` | Node label key to match. Ignored if `affinity` is set | `""` | +| `nodeAffinityPreset.values` | Node label values to match. Ignored if `affinity` is set | `[]` | +| `affinity` | Affinity for ClickHouse pods assignment | `{}` | +| `nodeSelector` | Node labels for ClickHouse pods assignment | `{}` | +| `tolerations` | Tolerations for ClickHouse pods assignment | `[]` | +| `updateStrategy.type` | ClickHouse statefulset strategy type | `RollingUpdate` | +| `podManagementPolicy` | Statefulset Pod management policy, it needs to be Parallel to be able to complete the cluster join | `Parallel` | +| `priorityClassName` | ClickHouse pods' priorityClassName | `""` | +| `topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template | `[]` | +| `schedulerName` | Name of the k8s scheduler (other than default) for ClickHouse pods | `""` | +| `terminationGracePeriodSeconds` | Seconds Redmine pod needs to terminate gracefully | `""` | +| `lifecycleHooks` | for the ClickHouse container(s) to automate configuration before or after startup | `{}` | +| `extraEnvVars` | Array with extra environment variables to add to ClickHouse nodes | `[]` | +| `extraEnvVarsCM` | Name of existing ConfigMap containing extra env vars for ClickHouse nodes | `""` | +| `extraEnvVarsSecret` | Name of existing Secret containing extra env vars for ClickHouse nodes | `""` | +| `extraVolumes` | Optionally specify extra list of additional volumes for the ClickHouse pod(s) | `[]` | +| `extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the ClickHouse container(s) | `[]` | +| `sidecars` | Add additional sidecar containers to the ClickHouse pod(s) | `[]` | +| `initContainers` | Add additional init containers to the ClickHouse pod(s) | `[]` | +| `tls.enabled` | Enable TLS traffic support | `false` | +| `tls.autoGenerated` | Generate automatically self-signed TLS certificates | `false` | +| `tls.certificatesSecret` | Name of an existing secret that contains the certificates | `""` | +| `tls.certFilename` | Certificate filename | `""` | +| `tls.certKeyFilename` | Certificate key filename | `""` | +| `tls.certCAFilename` | CA Certificate filename | `""` | + +### Traffic Exposure Parameters + +| Name | Description | Value | +| ------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------- | ------------------------ | +| `service.type` | ClickHouse service type | `ClusterIP` | +| `service.ports.http` | ClickHouse service HTTP port | `8123` | +| `service.ports.https` | ClickHouse service HTTPS port | `443` | +| `service.ports.tcp` | ClickHouse service TCP port | `9000` | +| `service.ports.tcpSecure` | ClickHouse service TCP (secure) port | `9440` | +| `service.ports.keeper` | ClickHouse keeper TCP container port | `2181` | +| `service.ports.keeperSecure` | ClickHouse keeper TCP (secure) container port | `3181` | +| `service.ports.keeperInter` | ClickHouse keeper interserver TCP container port | `9444` | +| `service.ports.mysql` | ClickHouse service MySQL port | `9004` | +| `service.ports.postgresql` | ClickHouse service PostgreSQL port | `9005` | +| `service.ports.interserver` | ClickHouse service Interserver port | `9009` | +| `service.ports.metrics` | ClickHouse service metrics port | `8001` | +| `service.nodePorts.http` | Node port for HTTP | `""` | +| `service.nodePorts.https` | Node port for HTTPS | `""` | +| `service.nodePorts.tcp` | Node port for TCP | `""` | +| `service.nodePorts.tcpSecure` | Node port for TCP (with TLS) | `""` | +| `service.nodePorts.keeper` | ClickHouse keeper TCP container port | `""` | +| `service.nodePorts.keeperSecure` | ClickHouse keeper TCP (secure) container port | `""` | +| `service.nodePorts.keeperInter` | ClickHouse keeper interserver TCP container port | `""` | +| `service.nodePorts.mysql` | Node port for MySQL | `""` | +| `service.nodePorts.postgresql` | Node port for PostgreSQL | `""` | +| `service.nodePorts.interserver` | Node port for Interserver | `""` | +| `service.nodePorts.metrics` | Node port for metrics | `""` | +| `service.clusterIP` | ClickHouse service Cluster IP | `""` | +| `service.loadBalancerIP` | ClickHouse service Load Balancer IP | `""` | +| `service.loadBalancerSourceRanges` | ClickHouse service Load Balancer sources | `[]` | +| `service.externalTrafficPolicy` | ClickHouse service external traffic policy | `Cluster` | +| `service.annotations` | Additional custom annotations for ClickHouse service | `{}` | +| `service.extraPorts` | Extra ports to expose in ClickHouse service (normally used with the `sidecars` value) | `[]` | +| `service.sessionAffinity` | Control where client requests go, to the same pod or round-robin | `None` | +| `service.sessionAffinityConfig` | Additional settings for the sessionAffinity | `{}` | +| `service.headless.annotations` | Annotations for the headless service. | `{}` | +| `externalAccess.enabled` | Enable Kubernetes external cluster access to ClickHouse | `false` | +| `externalAccess.service.type` | Kubernetes Service type for external access. It can be NodePort, LoadBalancer or ClusterIP | `LoadBalancer` | +| `externalAccess.service.ports.http` | ClickHouse service HTTP port | `80` | +| `externalAccess.service.ports.https` | ClickHouse service HTTPS port | `443` | +| `externalAccess.service.ports.tcp` | ClickHouse service TCP port | `9000` | +| `externalAccess.service.ports.tcpSecure` | ClickHouse service TCP (secure) port | `9440` | +| `externalAccess.service.ports.keeper` | ClickHouse keeper TCP container port | `2181` | +| `externalAccess.service.ports.keeperSecure` | ClickHouse keeper TCP (secure) container port | `3181` | +| `externalAccess.service.ports.keeperInter` | ClickHouse keeper interserver TCP container port | `9444` | +| `externalAccess.service.ports.mysql` | ClickHouse service MySQL port | `9004` | +| `externalAccess.service.ports.postgresql` | ClickHouse service PostgreSQL port | `9005` | +| `externalAccess.service.ports.interserver` | ClickHouse service Interserver port | `9009` | +| `externalAccess.service.ports.metrics` | ClickHouse service metrics port | `8001` | +| `externalAccess.service.loadBalancerIPs` | Array of load balancer IPs for each ClickHouse . Length must be the same as replicaCount | `[]` | +| `externalAccess.service.loadBalancerAnnotations` | Array of load balancer annotations for each ClickHouse . Length must be the same as shards multiplied by replicaCount | `[]` | +| `externalAccess.service.loadBalancerSourceRanges` | Address(es) that are allowed when service is LoadBalancer | `[]` | +| `externalAccess.service.nodePorts.http` | Node port for HTTP | `[]` | +| `externalAccess.service.nodePorts.https` | Node port for HTTPS | `[]` | +| `externalAccess.service.nodePorts.tcp` | Node port for TCP | `[]` | +| `externalAccess.service.nodePorts.tcpSecure` | Node port for TCP (with TLS) | `[]` | +| `externalAccess.service.nodePorts.keeper` | ClickHouse keeper TCP container port | `[]` | +| `externalAccess.service.nodePorts.keeperSecure` | ClickHouse keeper TCP container port (with TLS) | `[]` | +| `externalAccess.service.nodePorts.keeperInter` | ClickHouse keeper interserver TCP container port | `[]` | +| `externalAccess.service.nodePorts.mysql` | Node port for MySQL | `[]` | +| `externalAccess.service.nodePorts.postgresql` | Node port for PostgreSQL | `[]` | +| `externalAccess.service.nodePorts.interserver` | Node port for Interserver | `[]` | +| `externalAccess.service.nodePorts.metrics` | Node port for metrics | `[]` | +| `externalAccess.service.labels` | Service labels for external access | `{}` | +| `externalAccess.service.annotations` | Service annotations for external access | `{}` | +| `externalAccess.service.extraPorts` | Extra ports to expose in the ClickHouse external service | `[]` | +| `ingress.enabled` | Enable ingress record generation for ClickHouse | `false` | +| `ingress.pathType` | Ingress path type | `ImplementationSpecific` | +| `ingress.apiVersion` | Force Ingress API version (automatically detected if not set) | `""` | +| `ingress.hostname` | Default host for the ingress record | `clickhouse.local` | +| `ingress.ingressClassName` | IngressClass that will be be used to implement the Ingress (Kubernetes 1.18+) | `""` | +| `ingress.path` | Default path for the ingress record | `/` | +| `ingress.annotations` | Additional annotations for the Ingress resource. To enable certificate autogeneration, place here your cert-manager annotations. | `{}` | +| `ingress.tls` | Enable TLS configuration for the host defined at `ingress.hostname` parameter | `false` | +| `ingress.selfSigned` | Create a TLS secret for this ingress record using self-signed certificates generated by Helm | `false` | +| `ingress.extraHosts` | An array with additional hostname(s) to be covered with the ingress record | `[]` | +| `ingress.extraPaths` | An array with additional arbitrary paths that may need to be added to the ingress under the main host | `[]` | +| `ingress.extraTls` | TLS configuration for additional hostname(s) to be covered with this ingress record | `[]` | +| `ingress.secrets` | Custom TLS certificates as secrets | `[]` | +| `ingress.extraRules` | Additional rules to be covered with this ingress record | `[]` | + +### Persistence Parameters + +| Name | Description | Value | +| --------------------------- | ----------------------------------------------------------------------- | ------------------- | +| `persistence.enabled` | Enable persistence using Persistent Volume Claims | `true` | +| `persistence.existingClaim` | Name of an existing PVC to use | `""` | +| `persistence.storageClass` | Storage class of backing PVC | `""` | +| `persistence.labels` | Persistent Volume Claim labels | `{}` | +| `persistence.annotations` | Persistent Volume Claim annotations | `{}` | +| `persistence.accessModes` | Persistent Volume Access Modes | `["ReadWriteOnce"]` | +| `persistence.size` | Size of data volume | `8Gi` | +| `persistence.selector` | Selector to match an existing Persistent Volume for ClickHouse data PVC | `{}` | +| `persistence.dataSource` | Custom PVC data source | `{}` | + +### Init Container Parameters + +| Name | Description | Value | +| ------------------------------------------------------ | ----------------------------------------------------------------------------------------------- | -------------------------- | +| `volumePermissions.enabled` | Enable init container that changes the owner/group of the PV mount point to `runAsUser:fsGroup` | `false` | +| `volumePermissions.image.registry` | OS Shell + Utility image registry | `REGISTRY_NAME` | +| `volumePermissions.image.repository` | OS Shell + Utility image repository | `REPOSITORY_NAME/os-shell` | +| `volumePermissions.image.pullPolicy` | OS Shell + Utility image pull policy | `IfNotPresent` | +| `volumePermissions.image.pullSecrets` | OS Shell + Utility image pull secrets | `[]` | +| `volumePermissions.resources.limits` | The resources limits for the init container | `{}` | +| `volumePermissions.resources.requests` | The requested resources for the init container | `{}` | +| `volumePermissions.containerSecurityContext.runAsUser` | Set init container's Security Context runAsUser | `0` | + +### Other Parameters + +| Name | Description | Value | +| --------------------------------------------- | ------------------------------------------------------------------------------------------------------ | ------- | +| `serviceAccount.create` | Specifies whether a ServiceAccount should be created | `true` | +| `serviceAccount.name` | The name of the ServiceAccount to use. | `""` | +| `serviceAccount.annotations` | Additional Service Account annotations (evaluated as a template) | `{}` | +| `serviceAccount.automountServiceAccountToken` | Automount service account token for the server service account | `true` | +| `metrics.enabled` | Enable the export of Prometheus metrics | `false` | +| `metrics.podAnnotations` | Annotations for metrics scraping | `{}` | +| `metrics.serviceMonitor.enabled` | if `true`, creates a Prometheus Operator ServiceMonitor (also requires `metrics.enabled` to be `true`) | `false` | +| `metrics.serviceMonitor.namespace` | Namespace in which Prometheus is running | `""` | +| `metrics.serviceMonitor.annotations` | Additional custom annotations for the ServiceMonitor | `{}` | +| `metrics.serviceMonitor.labels` | Extra labels for the ServiceMonitor | `{}` | +| `metrics.serviceMonitor.jobLabel` | The name of the label on the target service to use as the job name in Prometheus | `""` | +| `metrics.serviceMonitor.honorLabels` | honorLabels chooses the metric's labels on collisions with target labels | `false` | +| `metrics.serviceMonitor.interval` | Interval at which metrics should be scraped. | `""` | +| `metrics.serviceMonitor.scrapeTimeout` | Timeout after which the scrape is ended | `""` | +| `metrics.serviceMonitor.metricRelabelings` | Specify additional relabeling of metrics | `[]` | +| `metrics.serviceMonitor.relabelings` | Specify general relabeling | `[]` | +| `metrics.serviceMonitor.selector` | Prometheus instance selector labels | `{}` | +| `metrics.prometheusRule.enabled` | Create a PrometheusRule for Prometheus Operator | `false` | +| `metrics.prometheusRule.namespace` | Namespace for the PrometheusRule Resource (defaults to the Release Namespace) | `""` | +| `metrics.prometheusRule.additionalLabels` | Additional labels that can be used so PrometheusRule will be discovered by Prometheus | `{}` | +| `metrics.prometheusRule.rules` | PrometheusRule definitions | `[]` | + +### External Zookeeper paramaters + +| Name | Description | Value | +| --------------------------- | ----------------------------------------- | ------ | +| `externalZookeeper.servers` | List of external zookeeper servers to use | `[]` | +| `externalZookeeper.port` | Port of the Zookeeper servers | `2888` | + +### Zookeeper subchart parameters + +| Name | Description | Value | +| -------------------------------- | ----------------------------- | --------------------------- | +| `zookeeper.enabled` | Deploy Zookeeper subchart | `true` | +| `zookeeper.replicaCount` | Number of Zookeeper instances | `3` | +| `zookeeper.service.ports.client` | Zookeeper client port | `2181` | +| `zookeeper.image.registry` | Zookeeper image registry | `REGISTRY_NAME` | +| `zookeeper.image.repository` | Zookeeper image repository | `REPOSITORY_NAME/zookeeper` | +| `zookeeper.image.pullPolicy` | Zookeeper image pull policy | `IfNotPresent` | + +See to create the table. + +The above parameters map to the env variables defined in [bitnami/clickhouse](https://github.com/bitnami/containers/tree/main/bitnami/clickhouse). For more information please refer to the [bitnami/clickhouse](https://github.com/bitnami/containers/tree/main/bitnami/clickhouse) image documentation. + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + +```console +helm install my-release \ + --set auth.username=admin \ + --set auth.password=password \ + oci://REGISTRY_NAME/REPOSITORY_NAME/clickhouse +``` + +> Note: You need to substitute the placeholders `REGISTRY_NAME` and `REPOSITORY_NAME` with a reference to your Helm chart registry and repository. For example, in the case of Bitnami, you need to use `REGISTRY_NAME=registry-1.docker.io` and `REPOSITORY_NAME=bitnamicharts`. + +The above command sets the ClickHouse administrator account username and password to `admin` and `password` respectively. + +> NOTE: Once this chart is deployed, it is not possible to change the application's access credentials, such as usernames or passwords, using Helm. To change these application credentials after deployment, delete any persistent volumes (PVs) used by the chart and re-deploy it, or use the application's built-in administrative tools if available. + +Alternatively, a YAML file that specifies the values for the above parameters can be provided while installing the chart. For example, + +```console +helm install my-release -f values.yaml oci://REGISTRY_NAME/REPOSITORY_NAME/clickhouse +``` + +> Note: You need to substitute the placeholders `REGISTRY_NAME` and `REPOSITORY_NAME` with a reference to your Helm chart registry and repository. For example, in the case of Bitnami, you need to use `REGISTRY_NAME=registry-1.docker.io` and `REPOSITORY_NAME=bitnamicharts`. +> **Tip**: You can use the default [values.yaml](https://github.com/bitnami/charts/tree/main/bitnami/clickhouse/values.yaml) + +## Configuration and installation details + +### [Rolling VS Immutable tags](https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/) + +It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image. + +Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist. + +### ClickHouse keeper support + +You can set `keeper.enabled` to use ClickHouse keeper. If `keeper.enabled=true`, Zookeeper settings will not be ignore. + +### External Zookeeper support + +You may want to have ClickHouse connect to an external zookeeper rather than installing one inside your cluster. Typical reasons for this are to use a managed database service, or to share a common database server for all your applications. To achieve this, the chart allows you to specify credentials for an external database with the [`externalZookeeper` parameter](#parameters). You should also disable the Zookeeper installation with the `zookeeper.enabled` option. Here is an example: + +```console +zookeper.enabled=false +externalZookeeper.host=myexternalhost +externalZookeeper.user=myuser +externalZookeeper.password=mypassword +externalZookeeper.database=mydatabase +externalZookeeper.port=3306 +``` + +### TLS secrets + +The chart also facilitates the creation of TLS secrets for use with the Ingress controller, with different options for certificate management. [Learn more about TLS secrets](https://docs.bitnami.com/kubernetes/infrastructure/clickhouse/administration/enable-tls-ingress/)). + +## Persistence + +The [Bitnami ClickHouse](https://github.com/bitnami/containers/tree/main/bitnami/clickhouse) image stores the ClickHouse data and configurations at the `/bitnami` path of the container. Persistent Volume Claims are used to keep the data across deployments. This is known to work in GCE, AWS, and minikube. + +### Additional environment variables + +In case you want to add extra environment variables (useful for advanced operations like custom init scripts), you can use the `extraEnvVars` property. + +```yaml +clickhouse: + extraEnvVars: + - name: LOG_LEVEL + value: error +``` + +Alternatively, you can use a ConfigMap or a Secret with the environment variables. To do so, use the `extraEnvVarsCM` or the `extraEnvVarsSecret` values. + +### Sidecars + +If additional containers are needed in the same pod as ClickHouse (such as additional metrics or logging exporters), they can be defined using the `sidecars` parameter. If these sidecars export extra ports, extra port definitions can be added using the `service.extraPorts` parameter. [Learn more about configuring and using sidecar containers](https://docs.bitnami.com/kubernetes/infrastructure/clickhouse/configuration/configure-sidecar-init-containers/). + +### Ingress without TLS + +For using ingress (example without TLS): + +```yaml +ingress: + ## If true, ClickHouse server Ingress will be created + ## + enabled: true + + ## ClickHouse server Ingress annotations + ## + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: 'true' + + ## ClickHouse server Ingress hostnames + ## Must be provided if Ingress is enabled + ## + hosts: + - clickhouse.domain.com +``` + +### Ingress TLS + +If your cluster allows automatic creation/retrieval of TLS certificates (e.g. [kube-lego](https://github.com/jetstack/kube-lego)), please refer to the documentation for that mechanism. + +To manually configure TLS, first create/retrieve a key & certificate pair for the address(es) you wish to protect. Then create a TLS secret (named `clickhouse-server-tls` in this example) in the namespace. Include the secret's name, along with the desired hostnames, in the Ingress TLS section of your custom `values.yaml` file: + +```yaml +ingress: + ## If true, ClickHouse server Ingress will be created + ## + enabled: true + + ## ClickHouse server Ingress annotations + ## + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: 'true' + + ## ClickHouse server Ingress hostnames + ## Must be provided if Ingress is enabled + ## + hosts: + - clickhouse.domain.com + + ## ClickHouse server Ingress TLS configuration + ## Secrets must be manually created in the namespace + ## + tls: + - secretName: clickhouse-server-tls + hosts: + - clickhouse.domain.com +``` + +### Using custom scripts + +For advanced operations, the Bitnami ClickHouse chart allows using custom init and start scripts that will be mounted in `/docker-entrypoint.initdb.d` and `/docker-entrypoint.startdb.d` . The `init` scripts will be run on the first boot whereas the `start` scripts will be run on every container start. For adding the scripts directly as values use the `initdbScripts` and `startdbScripts` values. For using Secrets use the `initdbScriptsSecret` and `startdbScriptsSecret`. + +```yaml +initdbScriptsSecret: init-scripts-secret +startdbScriptsSecret: start-scripts-secret +``` + +### Pod affinity + +This chart allows you to set your custom affinity using the `affinity` parameter. Find more information about Pod affinity in the [kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity). + +As an alternative, use one of the preset configurations for pod affinity, pod anti-affinity, and node affinity available at the [bitnami/common](https://github.com/bitnami/charts/tree/main/bitnami/common#affinities) chart. To do so, set the `podAffinityPreset`, `podAntiAffinityPreset`, or `nodeAffinityPreset` parameters. + +## Troubleshooting + +Find more information about how to deal with common errors related to Bitnami's Helm charts in [this troubleshooting guide](https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues). + +## Upgrading + +### To 2.0.0 + +This major updates the Zookeeper subchart to it newest major, 11.0.0. For more information on this subchart's major, please refer to [zookeeper upgrade notes](https://github.com/bitnami/charts/tree/main/bitnami/zookeeper#to-1100). + +## License + +Copyright © 2023 VMware, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. \ No newline at end of file diff --git a/charts/clickhouse/templates/NOTES.txt b/charts/clickhouse/templates/NOTES.txt old mode 100755 new mode 100644 index f8a6dd14..4bb61dab --- a/charts/clickhouse/templates/NOTES.txt +++ b/charts/clickhouse/templates/NOTES.txt @@ -1,31 +1,58 @@ -** Please be patient while the chart is being deployed ** - -1. Get the Clickhouse URL by running: - -{{- if .Values.clickhouse.ingress.enabled }} - - export HOSTNAME=$(kubectl get ingress --namespace {{ .Release.Namespace }} {{ template "clickhouse.fullname" . }} -o jsonpath='{.spec.rules[0].host}') - echo "Clickhouse URL: http://$HOSTNAME/" - -{{- else }} - - echo URL : http://127.0.0.1:8080/ - echo Management URL : http://127.0.0.1:8080/manager - kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ template "clickhouse.fullname" . }} 8123:{{ .Values.clickhouse.http_port }} - kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ template "clickhouse.fullname" . }} 9000:{{ .Values.clickhouse.tcp_port }} - kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ template "clickhouse.fullname" . }} 9009:{{ .Values.clickhouse.interserver_http_port }} - -{{- end }} - -2. Get the Tabix URL by running: - -{{- if .Values.tabix.ingress.enabled }} - - export HOSTNAME=$(kubectl get ingress --namespace {{ .Release.Namespace }} {{ template "clickhouse.fullname" . }}-tabix -o jsonpath='{.spec.rules[0].host}') - echo "Tabix URL: http://$HOSTNAME/" - -{{- else }} - - kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ template "clickhouse.fullname" . }}-tabix 80 - -{{- end }} +CHART NAME: {{ .Chart.Name }} +CHART VERSION: {{ .Chart.Version }} +APP VERSION: {{ .Chart.AppVersion }} + +** Please be patient while the chart is being deployed ** + +{{- if .Values.diagnosticMode.enabled }} +The chart has been deployed in diagnostic mode. All probes have been disabled and the command has been overwritten with: + + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 4 }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 4 }} + +Get the list of pods by executing: + + kubectl get pods --namespace {{ include "common.names.namespace" . | quote }} -l app.kubernetes.io/instance={{ .Release.Name }} + +Access the pod you want to debug by executing + + kubectl exec --namespace {{ include "common.names.namespace" . | quote }} -ti -- bash + +In order to replicate the container startup scripts execute this command: + + /opt/bitnami/scripts/clickhouse/entrypoint.sh /opt/bitnami/scripts/clickhouse/run.sh + +{{- else }} + +ClickHouse is available in the following address: + +{{- if .Values.externalAccess.enabled }} + +NOTE: It may take a few minutes for the LoadBalancer IP to be available. + + kubectl get svc --namespace {{ template "common.names.namespace" . }} -l "app.kubernetes.io/name={{ template "common.names.fullname" . }},app.kubernetes.io/instance={{ .Release.Name }},app.kubernetes.io/component=clickhouse" -w + +{{- else if (eq "LoadBalancer" .Values.service.type) }} + + export SERVICE_IP=$(kubectl get svc --namespace {{ template "common.names.namespace" . }} {{ template "common.names.fullname" . }} --template "{{ "{{ range (index .status.loadBalancer.ingress 0) }}{{ . }}{{ end }}" }}") + +{{- else if (eq "NodePort" .Values.service.type)}} + + export NODE_IP=$(kubectl get nodes --namespace {{ template "common.names.namespace" . }} -o jsonpath="{.items[0].status.addresses[0].address}") + export NODE_PORT=$(kubectl get --namespace {{ template "common.names.namespace" . }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "common.names.fullname" . }}) + +{{- else if (eq "ClusterIP" .Values.service.type)}} + + kubectl port-forward --namespace {{ template "common.names.namespace" . }} svc/{{ template "common.names.fullname" . }} {{ .Values.service.ports.tcp }}:9000 & + +{{- end }} + +Credentials: + + echo "Username : {{ .Values.auth.username }}" + echo "Password : $(kubectl get secret --namespace {{ .Release.Namespace }} {{ include "clickhouse.secretName" . }} -o jsonpath="{.data.{{ include "clickhouse.secretKey" .}}}" | base64 -d)" + +{{- end }} + +{{- include "common.warnings.rollingTag" .Values.image }} +{{- include "clickhouse.validateValues" . }} diff --git a/charts/clickhouse/templates/_helpers.tpl b/charts/clickhouse/templates/_helpers.tpl old mode 100755 new mode 100644 index e6690cdf..b5243526 --- a/charts/clickhouse/templates/_helpers.tpl +++ b/charts/clickhouse/templates/_helpers.tpl @@ -1,56 +1,219 @@ -{{/* vim: set filetype=mustache: */}} {{/* -Expand the name of the chart. +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 */}} -{{- define "clickhouse.name" -}} -{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} + +{{/* +Return the proper ClickHouse image name +*/}} +{{- define "clickhouse.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.image "global" .Values.global) }} {{- end -}} {{/* -Create a default fully qualified app name. -We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). -If release name contains chart name it will be used as a full name. +Return the proper image name (for the init container volume-permissions image) +*/}} +{{- define "clickhouse.volumePermissions.image" -}} +{{- include "common.images.image" ( dict "imageRoot" .Values.volumePermissions.image "global" .Values.global ) -}} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names +*/}} +{{- define "clickhouse.imagePullSecrets" -}} +{{- include "common.images.pullSecrets" (dict "images" (list .Values.image .Values.volumePermissions.image) "global" .Values.global) -}} +{{- end -}} + +{{/* +Return true if a TLS credentials secret object should be created */}} -{{- define "clickhouse.fullname" -}} -{{- if .Values.fullnameOverride -}} -{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- define "clickhouse.createTlsSecret" -}} +{{- if and .Values.tls.autoGenerated (not .Values.tls.certificatesSecret) }} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return the path to the CA cert file. +*/}} +{{- define "clickhouse.tlsSecretName" -}} +{{- if .Values.tls.autoGenerated }} + {{- printf "%s-crt" (include "common.names.fullname" .) -}} {{- else -}} -{{- $name := default .Chart.Name .Values.nameOverride -}} -{{- if contains $name .Release.Name -}} -{{- .Release.Name | trunc 63 | trimSuffix "-" -}} + {{ required "A secret containing TLS certificates is required when TLS is enabled" .Values.tls.certificatesSecret }} +{{- end -}} +{{- end -}} + +{{/* +Return the path to the cert file. +*/}} +{{- define "clickhouse.tlsCert" -}} +{{- if .Values.tls.autoGenerated }} + {{- printf "/opt/bitnami/clickhouse/certs/tls.crt" -}} {{- else -}} -{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} + {{- required "Certificate filename is required when TLS in enabled" .Values.tls.certFilename | printf "/opt/bitnami/clickhouse/certs/%s" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the path to the cert key file. +*/}} +{{- define "clickhouse.tlsCertKey" -}} +{{- if .Values.tls.autoGenerated }} + {{- printf "/opt/bitnami/clickhouse/certs/tls.key" -}} +{{- else -}} +{{- required "Certificate Key filename is required when TLS in enabled" .Values.tls.certKeyFilename | printf "/opt/bitnami/clickhouse/certs/%s" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the path to the CA cert file. +*/}} +{{- define "clickhouse.tlsCACert" -}} +{{- if .Values.tls.autoGenerated }} + {{- printf "/opt/bitnami/clickhouse/certs/ca.crt" -}} +{{- else -}} + {{- printf "/opt/bitnami/clickhouse/certs/%s" .Values.tls.certCAFilename -}} +{{- end -}} +{{- end -}} + +{{/* +Get the ClickHouse configuration configmap. +*/}} +{{- define "clickhouse.configmapName" -}} +{{- if .Values.existingOverridesConfigmap -}} + {{- .Values.existingOverridesConfigmap -}} +{{- else }} + {{- printf "%s" (include "common.names.fullname" . ) -}} +{{- end -}} +{{- end -}} + +{{/* +Get the ClickHouse configuration configmap. +*/}} +{{- define "clickhouse.extraConfigmapName" -}} +{{- if .Values.extraOverridesConfigmap -}} + {{- .Values.extraOverridesConfigmap -}} +{{- else }} + {{- printf "%s-extra" (include "common.names.fullname" . ) -}} +{{- end -}} {{- end -}} + + +{{/* +Get the ClickHouse configuration users configmap. +*/}} +{{- define "clickhouse.usersExtraConfigmapName" -}} +{{- if .Values.usersExtraOverridesConfigmap -}} + {{- .Values.usersExtraOverridesConfigmap -}} +{{- else }} + {{- printf "%s-users-extra" (include "common.names.fullname" . ) -}} +{{- end -}} +{{- end -}} + +{{/* +Get the Clickhouse password secret name +*/}} +{{- define "clickhouse.secretName" -}} +{{- if .Values.auth.existingSecret -}} + {{- .Values.auth.existingSecret -}} +{{- else }} + {{- printf "%s" (include "common.names.fullname" . ) -}} {{- end -}} {{- end -}} {{/* -Create chart name and version as used by the chart label. +Get the ClickHouse password key inside the secret */}} -{{- define "clickhouse.chart" -}} -{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- define "clickhouse.secretKey" -}} +{{- if .Values.auth.existingSecret -}} + {{- .Values.auth.existingSecretKey -}} +{{- else }} + {{- print "admin-password" -}} +{{- end -}} {{- end -}} {{/* -Create clickhouse path. -if .Values.clickhouse.path is empty, default value "/var/lib/clickhouse". +Get the startialization scripts Secret name. */}} -{{- define "clickhouse.fullpath" -}} -{{- if .Values.clickhouse.path -}} -{{- .Values.clickhouse.path | trunc 63 | trimSuffix "-" -}} +{{- define "clickhouse.startdbScriptsSecret" -}} +{{- if .Values.startdbScriptsSecret -}} + {{- printf "%s" (tpl .Values.startdbScriptsSecret $) -}} {{- else -}} -{{- printf "%s" "/var/lib/clickhouse" -}} + {{- printf "%s-start-scripts" (include "common.names.fullname" .) -}} {{- end -}} {{- end -}} {{/* -Create clickhouse log path. -if .Values.clickhouse.configmap.logger.path is empty, default value "/var/log/clickhouse-server". +Get the initialization scripts Secret name. */}} -{{- define "clickhouse.logpath" -}} -{{- if .Values.clickhouse.configmap.logger.path -}} -{{- .Values.clickhouse.configmap.logger.path | trunc 63 | trimSuffix "-" -}} +{{- define "clickhouse.initdbScriptsSecret" -}} +{{- if .Values.initdbScriptsSecret -}} + {{- printf "%s" (tpl .Values.initdbScriptsSecret $) -}} {{- else -}} -{{- printf "%s" "/var/log/clickhouse-server" -}} + {{- printf "%s-init-scripts" (include "common.names.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return the path to the CA cert file. +*/}} +{{- define "clickhouse.headlessServiceName" -}} +{{- printf "%s-headless" (include "common.names.fullname" .) -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "clickhouse.zookeeper.fullname" -}} +{{- include "common.names.dependency.fullname" (dict "chartName" "zookeeper" "chartValues" .Values.zookeeper "context" $) -}} +{{- end -}} + +{{/* +Return the path to the CA cert file. +*/}} +{{- define "clickhouse.zookeeper.headlessServiceName" -}} +{{- printf "%s-headless" (include "clickhouse.zookeeper.fullname" .) -}} +{{- end -}} + +{{/* +Create the name of the service account to use +*/}} +{{- define "clickhouse.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "common.names.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Compile all warnings into a single message. +*/}} +{{- define "clickhouse.validateValues" -}} +{{- $messages := list -}} +{{- $messages := append $messages (include "clickhouse.validateValues.zookeeper" .) -}} +{{- $messages := without $messages "" -}} +{{- $message := join "\n" $messages -}} + +{{- if $message -}} +{{- printf "\nVALUES VALIDATION:\n%s" $message -}} +{{- end -}} +{{- end -}} + +{{/* Validate values of ClickHouse - [Zoo]keeper */}} +{{- define "clickhouse.validateValues.zookeeper" -}} +{{- if or (and .Values.keeper.enabled .Values.zookeeper.enabled) (and .Values.keeper.enabled .Values.externalZookeeper.servers) (and .Values.zookeeper.enabled .Values.externalZookeeper.servers) -}} +clickhouse: Multiple [Zoo]keeper + You can only use one [zoo]keeper + Please choose use ClickHouse keeper or + installing a Zookeeper chart (--set zookeeper.enabled=true) or + using an external instance (--set zookeeper.servers ) +{{- end -}} +{{- if and (not .Values.keeper.enabled) (not .Values.zookeeper.enabled) (not .Values.externalZookeeper.servers) (ne (int .Values.shards) 1) (ne (int .Values.replicaCount) 1) -}} +clickhouse: No [Zoo]keeper + If you are deploying more than one ClickHouse instance, you need to enable [Zoo]keeper. Please choose installing a [Zoo]keeper (--set keeper.enabled=true) or (--set zookeeper.enabled=true) or + using an external instance (--set zookeeper.servers ) {{- end -}} {{- end -}} diff --git a/charts/clickhouse/templates/configmap-config.yaml b/charts/clickhouse/templates/configmap-config.yaml deleted file mode 100755 index 3bfae41e..00000000 --- a/charts/clickhouse/templates/configmap-config.yaml +++ /dev/null @@ -1,112 +0,0 @@ -{{- if .Values.clickhouse.configmap.enabled }} -apiVersion: v1 -kind: ConfigMap -metadata: - name: {{ include "clickhouse.fullname" . }}-config - labels: - app.kubernetes.io/name: {{ include "clickhouse.name" . }}-config - app.kubernetes.io/instance: {{ .Release.Name }}-config - app.kubernetes.io/managed-by: {{ .Release.Service }} -data: - config.xml: |- - - - {{ include "clickhouse.fullpath" . }}/ - {{ printf "%s/tmp/" (include "clickhouse.fullpath" .) }} - {{ printf "%s/user_files/" (include "clickhouse.fullpath" .) }} - {{ printf "%s/format_schemas/" (include "clickhouse.fullpath" .) }} - - /etc/clickhouse-server/metrica.d/metrica.xml - - users.xml - - {{ template "clickhouse.fullname" . }} - 0.0.0.0 - {{ .Values.clickhouse.http_port | default "8123" }} - {{ .Values.clickhouse.tcp_port | default "9000" }} - {{ .Values.clickhouse.interserver_http_port | default "9009" }} - {{ .Values.clickhouse.configmap.max_connections | default "4096" }} - {{ .Values.clickhouse.configmap.keep_alive_timeout | default "3" }} - {{ .Values.clickhouse.configmap.max_concurrent_queries | default "100" }} - {{ .Values.clickhouse.configmap.uncompressed_cache_size | default "8589934592" }} - {{ .Values.clickhouse.configmap.mark_cache_size | default "5368709120" }} - {{ .Values.timezone | default "Asia/Shanghai" }} - {{ .Values.clickhouse.configmap.umask | default "027" }} - {{ .Values.clickhouse.configmap.mlock_executable | default "false" }} - - - - {{ .Values.clickhouse.configmap.builtin_dictionaries_reload_interval | default "3600" }} - {{ .Values.clickhouse.configmap.max_session_timeout | default "3600" }} - {{ .Values.clickhouse.configmap.default_session_timeout | default "60" }} - {{ .Values.clickhouse.configmap.disable_internal_dns_cache | default "1" }} - - - system - query_log
- toYYYYMM(event_date) - 7500 -
- - - system - query_thread_log
- toYYYYMM(event_date) - 7500 -
- - - /clickhouse/task_queue/ddl - - - {{- if .Values.clickhouse.configmap.logger }} - - {{ .Values.clickhouse.configmap.logger.level | default "trace" }} - {{ printf "%s/%s" (include "clickhouse.logpath" .) "clickhouse-server.log" }} - {{ printf "%s/%s" (include "clickhouse.logpath" .) "clickhouse-server.err.log" }} - {{ .Values.clickhouse.configmap.logger.size | default "1000M" }} - {{ .Values.clickhouse.configmap.logger.count | default "10" }} - - {{- end }} - - {{- if .Values.clickhouse.configmap.compression.enabled }} - - {{- range .Values.clickhouse.configmap.compression.cases }} - {{- with . }} - - {{ .min_part_size }} - {{ .min_part_size_ratio }} - {{ .method }} - - {{- end }} - {{- end }} - - {{- end }} - - {{- if .Values.clickhouse.configmap.graphite.enabled }} - {{- range .Values.clickhouse.configmap.graphite.config }} - {{- with . }} - - {{ template "clickhouse.fullname" $ }}-graphite.{{ $.Release.Namespace }}.svc.{{ $.Values.clusterDomain }} - {{ $.Values.clickhouse.graphite.service.port }} - {{ .timeout | default "0.1" }} - {{ .interval | default "60" }} - {{ .root_path | default "one_min" }} - {{ .metrics | default "true" }} - {{ .events | default "true" }} - {{ .events_cumulative | default "true" }} - {{ .asynchronous_metrics | default "true" }} - - {{- end }} - {{- end }} - {{- end }} - - {{- if .Values.clickhouse.configmap.max_open_files }} - {{ .Values.clickhouse.configmap.max_open_files }} - {{- end }} - - {{- if .Values.clickhouse.configmap.interserver_http_host }} - {{ .Values.clickhouse.configmap.interserver_http_host }} - {{- end }} -
-{{- end }} diff --git a/charts/clickhouse/templates/configmap-extra.yaml b/charts/clickhouse/templates/configmap-extra.yaml new file mode 100644 index 00000000..153cf4d5 --- /dev/null +++ b/charts/clickhouse/templates/configmap-extra.yaml @@ -0,0 +1,20 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and .Values.extraOverrides (not .Values.extraOverridesConfigmap) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ printf "%s-extra" (include "common.names.fullname" .) }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: clickhouse + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + 01_extra_overrides.xml: | + {{- include "common.tplvalues.render" (dict "value" .Values.extraOverrides "context" $) | nindent 4 }} +{{- end }} diff --git a/charts/clickhouse/templates/configmap-metrika.yaml b/charts/clickhouse/templates/configmap-metrika.yaml deleted file mode 100755 index 2d14bc99..00000000 --- a/charts/clickhouse/templates/configmap-metrika.yaml +++ /dev/null @@ -1,77 +0,0 @@ -{{- if .Values.clickhouse.configmap.enabled }} -apiVersion: v1 -kind: ConfigMap -metadata: - name: {{ include "clickhouse.fullname" . }}-metrica - labels: - app.kubernetes.io/name: {{ include "clickhouse.name" . }}-metrica - app.kubernetes.io/instance: {{ .Release.Name }}-metrica - app.kubernetes.io/managed-by: {{ .Release.Service }} -data: - metrica.xml: |- - - - {{- if .Values.clickhouse.configmap.zookeeper_servers.enabled }} - - {{- range .Values.clickhouse.configmap.zookeeper_servers.config }} - {{- with . }} - - {{ .host }} - {{ .port }} - - {{- end }} - {{- end }} - {{ .Values.clickhouse.configmap.zookeeper_servers.session_timeout_ms }} - {{ .Values.clickhouse.configmap.zookeeper_servers.operation_timeout_ms }} - {{ .Values.clickhouse.configmap.zookeeper_servers.root }} - {{ .Values.clickhouse.configmap.zookeeper_servers.identity }} - - {{- end }} - - {{- if .Values.clickhouse.configmap.remote_servers.enabled }} - - <{{ include "clickhouse.fullname" . }}> - {{- range untilStep 0 (int .Values.clickhouse.replicas) 1 }} - - - {{ $.Values.clickhouse.configmap.remote_servers.internal_replication | default "false" }} - {{ include "clickhouse.fullname" $ }}-{{ . }}.{{ include "clickhouse.fullname" $ }}-headless.{{ $.Release.Namespace }}.svc.{{ $.Values.clusterDomain }} - {{ $.Values.clickhouse.tcp_port}} - {{- if $.Values.clickhouse.configmap.remote_servers.replica.user }} - {{ $.Values.clickhouse.configmap.remote_servers.replica.user }} - {{- end }} - {{- if $.Values.clickhouse.configmap.remote_servers.replica.password }} - {{ $.Values.clickhouse.configmap.remote_servers.replica.password }} - {{- end }} - {{- if $.Values.clickhouse.configmap.remote_servers.replica.secure }} - {{ $.Values.clickhouse.configmap.remote_servers.replica.secure }} - {{- end }} - {{- if $.Values.clickhouse.configmap.remote_servers.replica.compression }} - {{ $.Values.clickhouse.configmap.remote_servers.replica.compression }} - {{- end }} - - {{- if $.Values.clickhouse.configmap.remote_servers.replica.backup.enabled }} - - {{ include "clickhouse.fullname" $ }}-replica-{{ . }}.{{ include "clickhouse.fullname" $ }}-replica-headless.{{ $.Release.Namespace }}.svc.{{ $.Values.clusterDomain }} - {{ $.Values.clickhouse.tcp_port}} - {{- if $.Values.clickhouse.configmap.remote_servers.replica.user }} - {{ $.Values.clickhouse.configmap.remote_servers.replica.user }} - {{- end }} - {{- if $.Values.clickhouse.configmap.remote_servers.replica.password }} - {{ $.Values.clickhouse.configmap.remote_servers.replica.password }} - {{- end }} - {{- if $.Values.clickhouse.configmap.remote_servers.replica.secure }} - {{ $.Values.clickhouse.configmap.remote_servers.replica.secure }} - {{- end }} - {{- if $.Values.clickhouse.configmap.remote_servers.replica.compression }} - {{ $.Values.clickhouse.configmap.remote_servers.replica.compression }} - {{- end }} - - {{- end }} - - {{- end }} - - - {{- end }} - -{{- end }} diff --git a/charts/clickhouse/templates/configmap-users-extra.yaml b/charts/clickhouse/templates/configmap-users-extra.yaml new file mode 100644 index 00000000..056d2d02 --- /dev/null +++ b/charts/clickhouse/templates/configmap-users-extra.yaml @@ -0,0 +1,20 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and .Values.usersExtraOverrides (not .Values.usersExtraOverridesConfigmap) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ printf "%s-users-extra" (include "common.names.fullname" .) }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: clickhouse + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + 01_users_extra_overrides.xml: | + {{- include "common.tplvalues.render" (dict "value" .Values.usersExtraOverrides "context" $) | nindent 4 }} +{{- end }} diff --git a/charts/clickhouse/templates/configmap-users.yaml b/charts/clickhouse/templates/configmap-users.yaml deleted file mode 100755 index 99dbdc3c..00000000 --- a/charts/clickhouse/templates/configmap-users.yaml +++ /dev/null @@ -1,68 +0,0 @@ -{{- if .Values.clickhouse.configmap.enabled }} -apiVersion: v1 -kind: ConfigMap -metadata: - name: {{ include "clickhouse.fullname" . }}-users - labels: - app.kubernetes.io/name: {{ include "clickhouse.name" . }}-users - app.kubernetes.io/instance: {{ .Release.Name }}-users - app.kubernetes.io/managed-by: {{ .Release.Service }} -data: - users.xml: |- - - - {{- if .Values.clickhouse.configmap.profiles.enabled }} - - {{- range .Values.clickhouse.configmap.profiles.profile }} - {{- with . }} - <{{ .name }}> - {{- range $k_1, $v_1 := .config }} - <{{ $k_1 }}>{{ $v_1 }} - {{- end }} - - {{- end }} - {{- end }} - - {{- end }} - - {{- if .Values.clickhouse.configmap.users.enabled }} - - {{- range $key, $value := .Values.clickhouse.configmap.users.user }} - {{- with . }} - <{{ .name }}> - {{- range $k_1, $v_1 := .config }} - {{- if (eq "networks" $k_1) }} - - {{- range $v_1 }} - {{- with .}} - {{ . }} - {{- end }} - {{- end }} - - {{- else }} - <{{ $k_1 }}>{{ $v_1 }} - {{- end }} - {{- end }} - - {{- end }} - {{- end }} - - {{- end }} - - {{- if .Values.clickhouse.configmap.quotas.enabled }} - - {{- range $key, $value := .Values.clickhouse.configmap.quotas.quota }} - {{- with . }} - <{{ .name }}> - {{- range $val := .config }} - {{- range $k_1, $v_1 := $val }} - <{{ $k_1 }}>{{ $v_1 }} - {{- end }} - {{- end }} - - {{- end }} - {{- end }} - - {{- end }} - -{{- end }} diff --git a/charts/clickhouse/templates/configmap.yaml b/charts/clickhouse/templates/configmap.yaml new file mode 100644 index 00000000..2462712b --- /dev/null +++ b/charts/clickhouse/templates/configmap.yaml @@ -0,0 +1,20 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if not .Values.existingOverridesConfigmap }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "common.names.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: clickhouse + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + 00_default_overrides.xml: | + {{- include "common.tplvalues.render" (dict "value" .Values.defaultConfigurationOverrides "context" $) | nindent 4 }} +{{- end }} diff --git a/charts/clickhouse/templates/deployment-tabix.yaml b/charts/clickhouse/templates/deployment-tabix.yaml deleted file mode 100755 index e3c9e453..00000000 --- a/charts/clickhouse/templates/deployment-tabix.yaml +++ /dev/null @@ -1,85 +0,0 @@ -{{- if .Values.tabix.enabled }} -apiVersion: apps/v1 -kind: Deployment -metadata: - name: {{ include "clickhouse.fullname" . }}-tabix - labels: - app.kubernetes.io/name: {{ include "clickhouse.name" . }}-tabix - app.kubernetes.io/instance: {{ .Release.Name }}-tabix - app.kubernetes.io/managed-by: {{ .Release.Service }} -spec: - replicas: {{ .Values.tabix.replicas }} - selector: - matchLabels: - app.kubernetes.io/name: {{ include "clickhouse.name" . }}-tabix - app.kubernetes.io/instance: {{ .Release.Name }}-tabix -{{- if .Values.tabix.updateStrategy }} - strategy: - type: {{ .Values.tabix.updateStrategy.type }} - rollingUpdate: - maxSurge: {{ .Values.tabix.updateStrategy.maxSurge }} - maxUnavailable: {{ .Values.tabix.updateStrategy.maxUnavailable }} -{{- end }} - template: - metadata: - labels: - app.kubernetes.io/name: {{ include "clickhouse.name" . }}-tabix - app.kubernetes.io/instance: {{ .Release.Name }}-tabix - spec: - {{- if .Values.affinity }} - affinity: -{{ toYaml .Values.affinity | indent 8 }} - {{- end }} - {{- if .Values.tabix.imagePullSecrets }} - imagePullSecrets: - {{- range .Values.tabix.imagePullSecrets }} - - name: {{ . | quote }} - {{- end }} - {{- end }} - containers: - - name: {{ include "clickhouse.name" . }}-tabix - image: {{ .Values.tabix.image }}:{{ .Values.tabix.imageVersion }} - imagePullPolicy: {{ .Values.tabix.imagePullPolicy }} - ports: - - name: http - containerPort: 80 - env: - {{- if .Values.tabix.security }} - - name: USER - value: {{ .Values.tabix.security.user }} - - name: PASSWORD - value: {{ .Values.tabix.security.password }} - {{- end }} - {{- if .Values.tabix.automaticConnection }} - - name: CH_NAME - value: {{ .Values.tabix.automaticConnection.chName }} - - name: CH_HOST - value: {{ .Values.tabix.automaticConnection.chHost }} - - name: CH_LOGIN - value: {{ .Values.tabix.automaticConnection.chLogin }} - - name: CH_PASSWORD - value: {{ .Values.tabix.automaticConnection.chPassword }} - - name: CH_PARAMS - value: {{ .Values.tabix.automaticConnection.chParams }} - {{- end }} - {{- if .Values.tabix.livenessProbe.enabled }} - livenessProbe: - tcpSocket: - port: 80 - initialDelaySeconds: {{ .Values.tabix.livenessProbe.initialDelaySeconds }} - periodSeconds: {{ .Values.tabix.livenessProbe.periodSeconds }} - timeoutSeconds: {{ .Values.tabix.livenessProbe.timeoutSeconds }} - failureThreshold: {{ .Values.tabix.livenessProbe.failureThreshold }} - successThreshold: {{ .Values.tabix.livenessProbe.successThreshold }} - {{- end }} - {{- if .Values.tabix.readinessProbe.enabled }} - readinessProbe: - tcpSocket: - port: 80 - initialDelaySeconds: {{ .Values.tabix.readinessProbe.initialDelaySeconds }} - periodSeconds: {{ .Values.tabix.readinessProbe.periodSeconds }} - timeoutSeconds: {{ .Values.tabix.readinessProbe.timeoutSeconds }} - failureThreshold: {{ .Values.tabix.readinessProbe.failureThreshold }} - successThreshold: {{ .Values.tabix.readinessProbe.successThreshold }} - {{- end }} -{{- end }} diff --git a/charts/clickhouse/templates/extra-list.yaml b/charts/clickhouse/templates/extra-list.yaml new file mode 100644 index 00000000..2d35a580 --- /dev/null +++ b/charts/clickhouse/templates/extra-list.yaml @@ -0,0 +1,9 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- range .Values.extraDeploy }} +--- +{{ include "common.tplvalues.render" (dict "value" . "context" $) }} +{{- end }} diff --git a/charts/clickhouse/templates/ingress-clickhouse.yaml b/charts/clickhouse/templates/ingress-clickhouse.yaml deleted file mode 100755 index a4a672b7..00000000 --- a/charts/clickhouse/templates/ingress-clickhouse.yaml +++ /dev/null @@ -1,27 +0,0 @@ -{{- if .Values.clickhouse.ingress.enabled}} -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: {{ include "clickhouse.fullname" . }} - labels: - app.kubernetes.io/name: {{ include "clickhouse.name" . }} - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/managed-by: {{ .Release.Service }} -spec: - rules: - host: {{ .Values.clickhouse.ingress.host }} - http: - paths: - - path: {{ .Values.clickhouse.ingress.path }} - backend: - serviceName: {{ include "clickhouse.fullname" . }} - servicePort: http -{{- if .Values.clickhouse.ingress.tls.enabled }} - tls: - hosts: - {{- range .Values.clickhouse.ingress.tls.hosts }} - - {{ . | quote }} - {{- end }} - secretName: {{ .Values.clickhouse.ingress.tls.secretName }} -{{- end }} -{{- end }} diff --git a/charts/clickhouse/templates/ingress-tabix.yaml b/charts/clickhouse/templates/ingress-tabix.yaml deleted file mode 100755 index 3b85c886..00000000 --- a/charts/clickhouse/templates/ingress-tabix.yaml +++ /dev/null @@ -1,29 +0,0 @@ -{{- if .Values.tabix.enabled }} -{{- if .Values.tabix.ingress.enabled}} -apiVersion: extensions/v1beta1 -kind: Ingress -metadata: - name: {{ include "clickhouse.fullname" . }}-tabix - labels: - app.kubernetes.io/name: {{ include "clickhouse.name" . }}-tabix - app.kubernetes.io/instance: {{ .Release.Name }}-tabix - app.kubernetes.io/managed-by: {{ .Release.Service }} -spec: - rules: - host: {{ .Values.tabix.ingress.host }} - http: - paths: - - path: {{ .Values.tabix.ingress.path }} - backend: - serviceName: {{ include "clickhouse.fullname" . }}-tabix - servicePort: http -{{- if .Values.tabix.ingress.tls.enabled }} - tls: - hosts: - {{- range .Values.tabix.ingress.tls.hosts }} - - {{ . | quote }} - {{- end }} - secretName: {{ .Values.tabix.ingress.tls.secretName }} -{{- end }} -{{- end }} -{{- end }} diff --git a/charts/clickhouse/templates/ingress-tls-secrets.yaml b/charts/clickhouse/templates/ingress-tls-secrets.yaml new file mode 100644 index 00000000..6ef20e36 --- /dev/null +++ b/charts/clickhouse/templates/ingress-tls-secrets.yaml @@ -0,0 +1,44 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.ingress.enabled }} +{{- if .Values.ingress.secrets }} +{{- range .Values.ingress.secrets }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ .name }} + namespace: {{ $.Release.Namespace | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" $.Values.commonLabels "context" $ ) | nindent 4 }} + {{- if $.Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $.Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: kubernetes.io/tls +data: + tls.crt: {{ .certificate | b64enc }} + tls.key: {{ .key | b64enc }} +--- +{{- end }} +{{- end }} +{{- if and .Values.ingress.tls .Values.ingress.selfSigned }} +{{- $secretName := printf "%s-tls" .Values.ingress.hostname }} +{{- $ca := genCA "clickhouse-ca" 365 }} +{{- $cert := genSignedCert .Values.ingress.hostname nil (list .Values.ingress.hostname) 365 $ca }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ $secretName }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: kubernetes.io/tls +data: + tls.crt: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "tls.crt" "defaultValue" $cert.Cert "context" $) }} + tls.key: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "tls.key" "defaultValue" $cert.Key "context" $) }} + ca.crt: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "ca.crt" "defaultValue" $ca.Cert "context" $) }} +{{- end }} +{{- end }} diff --git a/charts/clickhouse/templates/ingress.yaml b/charts/clickhouse/templates/ingress.yaml new file mode 100644 index 00000000..7000eceb --- /dev/null +++ b/charts/clickhouse/templates/ingress.yaml @@ -0,0 +1,59 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.ingress.enabled }} +apiVersion: {{ include "common.capabilities.ingress.apiVersion" . }} +kind: Ingress +metadata: + name: {{ include "common.names.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- if or .Values.ingress.annotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.ingress.annotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +spec: + {{- if and .Values.ingress.ingressClassName (eq "true" (include "common.ingress.supportsIngressClassname" .)) }} + ingressClassName: {{ .Values.ingress.ingressClassName | quote }} + {{- end }} + rules: + {{- if .Values.ingress.hostname }} + - host: {{ .Values.ingress.hostname | quote }} + http: + paths: + {{- if .Values.ingress.extraPaths }} + {{- toYaml .Values.ingress.extraPaths | nindent 10 }} + {{- end }} + - path: {{ .Values.ingress.path }} + {{- if eq "true" (include "common.ingress.supportsPathType" .) }} + pathType: {{ .Values.ingress.pathType }} + {{- end }} + backend: {{- include "common.ingress.backend" (dict "serviceName" (include "common.names.fullname" .) "servicePort" "http" "context" $) | nindent 14 }} + {{- end }} + {{- range .Values.ingress.extraHosts }} + - host: {{ .name | quote }} + http: + paths: + - path: {{ default "/" .path }} + {{- if eq "true" (include "common.ingress.supportsPathType" $) }} + pathType: {{ default "ImplementationSpecific" .pathType }} + {{- end }} + backend: {{- include "common.ingress.backend" (dict "serviceName" (include "common.names.fullname" $) "servicePort" "http" "context" $) | nindent 14 }} + {{- end }} + {{- if .Values.ingress.extraRules }} + {{- include "common.tplvalues.render" (dict "value" .Values.ingress.extraRules "context" $) | nindent 4 }} + {{- end }} + {{- if or (and .Values.ingress.tls (or (include "common.ingress.certManagerRequest" ( dict "annotations" .Values.ingress.annotations )) .Values.ingress.selfSigned)) .Values.ingress.extraTls }} + tls: + {{- if and .Values.ingress.tls (or (include "common.ingress.certManagerRequest" ( dict "annotations" .Values.ingress.annotations )) .Values.ingress.selfSigned) }} + - hosts: + - {{ .Values.ingress.hostname | quote }} + secretName: {{ printf "%s-tls" .Values.ingress.hostname }} + {{- end }} + {{- if .Values.ingress.extraTls }} + {{- include "common.tplvalues.render" (dict "value" .Values.ingress.extraTls "context" $) | nindent 4 }} + {{- end }} + {{- end }} +{{- end }} diff --git a/charts/clickhouse/templates/init-scripts-secret.yaml b/charts/clickhouse/templates/init-scripts-secret.yaml new file mode 100644 index 00000000..32367093 --- /dev/null +++ b/charts/clickhouse/templates/init-scripts-secret.yaml @@ -0,0 +1,19 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and .Values.initdbScripts (not .Values.initdbScriptsSecret) }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ printf "%s-init-scripts" (include "common.names.fullname" .) }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: clickhouse + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +stringData: +{{- include "common.tplvalues.render" (dict "value" .Values.initdbScripts "context" .) | nindent 2 }} +{{- end }} diff --git a/charts/clickhouse/templates/prometheusrule.yaml b/charts/clickhouse/templates/prometheusrule.yaml new file mode 100644 index 00000000..dc2d05d3 --- /dev/null +++ b/charts/clickhouse/templates/prometheusrule.yaml @@ -0,0 +1,24 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and .Values.metrics.enabled .Values.metrics.prometheusRule.enabled .Values.metrics.prometheusRule.rules }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ include "common.names.fullname" . }} + namespace: {{ default .Release.Namespace .Values.metrics.prometheusRule.namespace | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: metrics + {{- if .Values.metrics.prometheusRule.additionalLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.prometheusRule.additionalLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + groups: + - name: {{ include "common.names.fullname" . }} + rules: {{- toYaml .Values.metrics.prometheusRule.rules | nindent 8 }} +{{- end }} diff --git a/charts/clickhouse/templates/scripts-configmap.yaml b/charts/clickhouse/templates/scripts-configmap.yaml new file mode 100644 index 00000000..86aa34dc --- /dev/null +++ b/charts/clickhouse/templates/scripts-configmap.yaml @@ -0,0 +1,34 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ printf "%s-scripts" (include "common.names.fullname" .) }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: clickhouse + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + setup.sh: |- + #!/bin/bash + + # Execute entrypoint as usual after obtaining KEEPER_SERVER_ID + # check KEEPER_SERVER_ID in persistent volume via myid + # if not present, set based on POD hostname + if [[ -f "/bitnami/clickhouse/keeper/data/myid" ]]; then + export KEEPER_SERVER_ID="$(cat /bitnami/clickhouse/keeper/data/myid)" + else + HOSTNAME="$(hostname -s)" + if [[ $HOSTNAME =~ (.*)-([0-9]+)$ ]]; then + export KEEPER_SERVER_ID=${BASH_REMATCH[2]} + else + echo "Failed to get index from hostname $HOST" + exit 1 + fi + fi + exec /opt/bitnami/scripts/clickhouse/entrypoint.sh /opt/bitnami/scripts/clickhouse/run.sh -- --listen_host=0.0.0.0 diff --git a/charts/clickhouse/templates/service-account.yaml b/charts/clickhouse/templates/service-account.yaml new file mode 100644 index 00000000..649086da --- /dev/null +++ b/charts/clickhouse/templates/service-account.yaml @@ -0,0 +1,19 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "clickhouse.serviceAccountName" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: clickhouse + {{- if or .Values.serviceAccount.annotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.serviceAccount.annotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +automountServiceAccountToken: {{ .Values.serviceAccount.automountServiceAccountToken }} +{{- end }} diff --git a/charts/clickhouse/templates/service-external-access.yaml b/charts/clickhouse/templates/service-external-access.yaml new file mode 100644 index 00000000..f50baa21 --- /dev/null +++ b/charts/clickhouse/templates/service-external-access.yaml @@ -0,0 +1,155 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if $.Values.externalAccess.enabled }} +{{- $shards := $.Values.shards | int }} +{{- $replicas := $.Values.replicaCount | int }} +{{- $totalNodes := mul $shards $replicas }} +{{- range $shard, $e := until $shards }} +{{- range $i, $_e := until $replicas }} +{{- $loadBalancerAnnotationPosOffset := mul $shard $replicas }} +{{- $loadBalancerAnnotationPosition := add $loadBalancerAnnotationPosOffset $i }} +{{- $targetPod := printf "%s-shard%d-%d" (include "common.names.fullname" $) $shard $i }} +apiVersion: v1 +kind: Service +metadata: + name: {{ printf "%s-external" $targetPod | trunc 63 | trimSuffix "-" }} + namespace: {{ $.Release.Namespace | quote }} + {{- $labels := include "common.tplvalues.merge" ( dict "values" ( list $.Values.externalAccess.service.labels $.Values.commonLabels ) "context" $ ) }} + labels: {{- include "common.labels.standard" ( dict "customLabels" $labels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: clickhouse + pod: {{ $targetPod }} + {{- if or $.Values.externalAccess.service.annotations $.Values.commonAnnotations $.Values.externalAccess.service.loadBalancerAnnotations }} + annotations: + {{- if and (not (empty $.Values.externalAccess.service.loadBalancerAnnotations)) (eq (len $.Values.externalAccess.service.loadBalancerAnnotations) $totalNodes) }} + {{ include "common.tplvalues.render" ( dict "value" (index $.Values.externalAccess.service.loadBalancerAnnotations $loadBalancerAnnotationPosition) "context" $) | nindent 4 }} + {{- end }} + {{- if $.Values.externalAccess.service.annotations }} + {{- include "common.tplvalues.render" ( dict "value" $.Values.externalAccess.service.annotations "context" $) | nindent 4 }} + {{- end }} + {{- if $.Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" $.Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} +spec: + type: {{ $.Values.externalAccess.service.type }} + {{- if eq $.Values.externalAccess.service.type "LoadBalancer" }} + {{- if and (not (empty $.Values.externalAccess.service.loadBalancerIPs)) (eq (len $.Values.externalAccess.service.loadBalancerIPs) $totalNodes) }} + loadBalancerIP: {{ index $.Values.externalAccess.service.loadBalancerIPs $i }} + {{- end }} + {{- if $.Values.externalAccess.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: {{- toYaml $.Values.externalAccess.service.loadBalancerSourceRanges | nindent 4 }} + {{- end }} + {{- end }} + ports: + - name: http + port: {{ $.Values.externalAccess.service.ports.http }} + targetPort: http + {{- if not (empty $.Values.externalAccess.service.nodePorts.http) }} + nodePort: {{ index $.Values.externalAccess.service.nodePorts.http $i }} + {{- else }} + nodePort: null + {{- end }} + {{- if $.Values.tls.enabled }} + - name: https + port: {{ $.Values.externalAccess.service.ports.https }} + targetPort: https + {{- if not (empty $.Values.externalAccess.service.nodePorts.https) }} + nodePort: {{ index $.Values.externalAccess.service.nodePorts.https $i }} + {{- else }} + nodePort: null + {{- end }} + {{- end }} + {{- if $.Values.metrics.enabled }} + - name: http-metrics + port: {{ $.Values.externalAccess.service.ports.metrics }} + targetPort: http-metrics + {{- if not (empty $.Values.externalAccess.service.nodePorts.metrics) }} + nodePort: {{ index $.Values.externalAccess.service.nodePorts.metrics $i }} + {{- else }} + nodePort: null + {{- end }} + {{- end }} + - name: tcp + port: {{ $.Values.externalAccess.service.ports.tcp }} + targetPort: tcp + {{- if not (empty $.Values.externalAccess.service.nodePorts.tcp) }} + nodePort: {{ index $.Values.externalAccess.service.nodePorts.tcp $i }} + {{- else }} + nodePort: null + {{- end }} + {{- if $.Values.tls.enabled }} + - name: tcp-secure + port: {{ $.Values.externalAccess.service.ports.tcpSecure }} + targetPort: tcp-secure + {{- if not (empty $.Values.externalAccess.service.nodePorts.tcpSecure) }} + nodePort: {{ index $.Values.externalAccess.service.nodePorts.tcpSecure $i }} + {{- else }} + nodePort: null + {{- end }} + {{- end }} + {{- if $.Values.keeper.enabled }} + - name: tcp-keeper + port: {{ $.Values.externalAccess.service.ports.keeper }} + targetPort: tcp-keeper + {{- if not (empty $.Values.externalAccess.service.nodePorts.keeper) }} + nodePort: {{ index $.Values.externalAccess.service.nodePorts.keeper $i }} + {{- else }} + nodePort: null + {{- end }} + - name: tcp-keeperinter + port: {{ $.Values.externalAccess.service.ports.keeperInter }} + targetPort: tcp-keeperinter + {{- if not (empty $.Values.externalAccess.service.nodePorts.keeperInter) }} + nodePort: {{ index $.Values.externalAccess.service.nodePorts.keeperInter $i }} + {{- else }} + nodePort: null + {{- end }} + {{- if $.Values.tls.enabled }} + - name: tcp-keepertls + port: {{ $.Values.externalAccess.service.ports.keeperSecure }} + targetPort: tcp-keepertls + {{- if not (empty $.Values.externalAccess.service.nodePorts.keeperSecure) }} + nodePort: {{ index $.Values.externalAccess.service.nodePorts.keeperSecure $i }} + {{- else }} + nodePort: null + {{- end }} + {{- end }} + {{- end }} + - name: tcp-mysql + port: {{ $.Values.externalAccess.service.ports.mysql }} + targetPort: tcp-mysql + {{- if not (empty $.Values.externalAccess.service.nodePorts.mysql) }} + nodePort: {{ index $.Values.externalAccess.service.nodePorts.mysql $i }} + {{- else }} + nodePort: null + {{- end }} + - name: tcp-postgresql + port: {{ $.Values.externalAccess.service.ports.postgresql }} + targetPort: tcp-postgresql + {{- if not (empty $.Values.externalAccess.service.nodePorts.postgresql) }} + nodePort: {{ index $.Values.externalAccess.service.nodePorts.postgresql $i }} + {{- else }} + nodePort: null + {{- end }} + - name: tcp-intersrv + port: {{ $.Values.externalAccess.service.ports.interserver }} + targetPort: tcp-intersrv + {{- if not (empty $.Values.externalAccess.service.nodePorts.interserver) }} + nodePort: {{ index $.Values.externalAccess.service.nodePorts.interserver $i }} + {{- else }} + nodePort: null + {{- end }} + {{- if $.Values.externalAccess.service.extraPorts }} + {{- include "common.tplvalues.render" (dict "value" $.Values.externalAccess.service.extraPorts "context" $) | nindent 4 }} + {{- end }} + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list $.Values.podLabels $.Values.commonLabels ) "context" $ ) }} + selector: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: clickhouse + statefulset.kubernetes.io/pod-name: {{ $targetPod }} +--- +{{- end }} +{{- end }} +{{- end }} diff --git a/charts/clickhouse/templates/service-headless.yaml b/charts/clickhouse/templates/service-headless.yaml new file mode 100644 index 00000000..f989841b --- /dev/null +++ b/charts/clickhouse/templates/service-headless.yaml @@ -0,0 +1,69 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +apiVersion: v1 +kind: Service +metadata: + name: {{ include "clickhouse.headlessServiceName" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: clickhouse + {{- if or .Values.service.headless.annotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.service.headless.annotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +spec: + type: ClusterIP + clusterIP: None + publishNotReadyAddresses: true + ports: + - name: http + targetPort: http + port: {{ .Values.service.ports.http }} + protocol: TCP + - name: tcp + targetPort: tcp + port: {{ .Values.service.ports.tcp }} + protocol: TCP + {{- if .Values.tls.enabled }} + - name: tcp-secure + targetPort: tcp-secure + port: {{ .Values.service.ports.tcpSecure }} + protocol: TCP + {{- end }} + {{- if .Values.keeper.enabled }} + - name: tcp-keeper + targetPort: tcp-keeper + port: {{ .Values.service.ports.keeper }} + protocol: TCP + - name: tcp-keeperinter + targetPort: tcp-keeperinter + port: {{ .Values.service.ports.keeperInter }} + protocol: TCP + {{- if .Values.tls.enabled }} + - name: tcp-keepertls + targetPort: tcp-keepertls + port: {{ .Values.service.ports.keeperSecure }} + protocol: TCP + {{- end }} + {{- end }} + - name: tcp-mysql + targetPort: tcp-mysql + port: {{ .Values.service.ports.mysql }} + protocol: TCP + - name: tcp-postgresql + targetPort: tcp-postgresql + port: {{ .Values.service.ports.postgresql }} + protocol: TCP + - name: http-intersrv + targetPort: http-intersrv + port: {{ .Values.service.ports.interserver }} + protocol: TCP + {{- if .Values.service.extraPorts }} + {{- include "common.tplvalues.render" (dict "value" .Values.service.extraPorts "context" $) | nindent 4 }} + {{- end }} + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.podLabels .Values.commonLabels ) "context" . ) }} + selector: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: clickhouse diff --git a/charts/clickhouse/templates/service.yaml b/charts/clickhouse/templates/service.yaml new file mode 100644 index 00000000..f54e2268 --- /dev/null +++ b/charts/clickhouse/templates/service.yaml @@ -0,0 +1,152 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +apiVersion: v1 +kind: Service +metadata: + name: {{ template "common.names.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: clickhouse + {{- if or .Values.service.annotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.service.annotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +spec: + type: {{ .Values.service.type }} + {{- if and .Values.service.clusterIP (eq .Values.service.type "ClusterIP") }} + clusterIP: {{ .Values.service.clusterIP }} + {{- end }} + {{- if .Values.service.sessionAffinity }} + sessionAffinity: {{ .Values.service.sessionAffinity }} + {{- end }} + {{- if .Values.service.sessionAffinityConfig }} + sessionAffinityConfig: {{- include "common.tplvalues.render" (dict "value" .Values.service.sessionAffinityConfig "context" $) | nindent 4 }} + {{- end }} + {{- if or (eq .Values.service.type "LoadBalancer") (eq .Values.service.type "NodePort") }} + externalTrafficPolicy: {{ .Values.service.externalTrafficPolicy | quote }} + {{- end }} + {{- if and (eq .Values.service.type "LoadBalancer") (not (empty .Values.service.loadBalancerSourceRanges)) }} + loadBalancerSourceRanges: {{- toYaml .Values.service.loadBalancerSourceRanges | nindent 4 }} + {{- end }} + {{- if and (eq .Values.service.type "LoadBalancer") (not (empty .Values.service.loadBalancerIP)) }} + loadBalancerIP: {{ .Values.service.loadBalancerIP }} + {{- end }} + ports: + - name: http + targetPort: http + port: {{ .Values.service.ports.http }} + protocol: TCP + {{- if and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty .Values.service.nodePorts.http)) }} + nodePort: {{ .Values.service.nodePorts.http }} + {{- else if eq .Values.service.type "ClusterIP" }} + nodePort: null + {{- end }} + {{- if .Values.tls.enabled }} + - name: https + targetPort: https + port: {{ .Values.service.ports.https }} + protocol: TCP + {{- if and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty .Values.service.nodePorts.https)) }} + nodePort: {{ .Values.service.nodePorts.https }} + {{- else if eq .Values.service.type "ClusterIP" }} + nodePort: null + {{- end }} + {{- end }} + - name: tcp + targetPort: tcp + port: {{ .Values.service.ports.tcp }} + protocol: TCP + {{- if and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty .Values.service.nodePorts.tcp)) }} + nodePort: {{ .Values.service.nodePorts.tcp }} + {{- else if eq .Values.service.type "ClusterIP" }} + nodePort: null + {{- end }} + {{- if .Values.tls.enabled }} + - name: tcp-secure + targetPort: tcp-secure + port: {{ .Values.service.ports.tcpSecure }} + protocol: TCP + {{- if and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty .Values.service.nodePorts.tcpSecure)) }} + nodePort: {{ .Values.service.nodePorts.tcpSecure }} + {{- else if eq .Values.service.type "ClusterIP" }} + nodePort: null + {{- end }} + {{- end }} + {{- if .Values.keeper.enabled }} + - name: tcp-keeper + targetPort: tcp-keeper + port: {{ .Values.service.ports.keeper }} + protocol: TCP + {{- if and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty .Values.service.nodePorts.tcp)) }} + nodePort: {{ .Values.service.nodePorts.keeper }} + {{- else if eq .Values.service.type "ClusterIP" }} + nodePort: null + {{- end }} + - name: tcp-keeperinter + targetPort: tcp-keeperinter + port: {{ .Values.service.ports.keeperInter }} + protocol: TCP + {{- if and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty .Values.service.nodePorts.tcp)) }} + nodePort: {{ .Values.service.nodePorts.keeperInter }} + {{- else if eq .Values.service.type "ClusterIP" }} + nodePort: null + {{- end }} + {{- if .Values.tls.enabled }} + - name: tcp-keepertls + targetPort: tcp-keepertls + port: {{ .Values.service.ports.keeperSecure }} + protocol: TCP + {{- if and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty .Values.service.nodePorts.tcpSecure)) }} + nodePort: {{ .Values.service.nodePorts.keeperSecure }} + {{- else if eq .Values.service.type "ClusterIP" }} + nodePort: null + {{- end }} + {{- end }} + {{- end }} + - name: tcp-mysql + targetPort: tcp-mysql + port: {{ .Values.service.ports.mysql }} + protocol: TCP + {{- if and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty .Values.service.nodePorts.mysql)) }} + nodePort: {{ .Values.service.nodePorts.mysql }} + {{- else if eq .Values.service.type "ClusterIP" }} + nodePort: null + {{- end }} + - name: tcp-postgresql + targetPort: tcp-postgresql + port: {{ .Values.service.ports.postgresql }} + protocol: TCP + {{- if and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty .Values.service.nodePorts.postgresql)) }} + nodePort: {{ .Values.service.nodePorts.postgresql }} + {{- else if eq .Values.service.type "ClusterIP" }} + nodePort: null + {{- end }} + - name: http-intersrv + targetPort: http-intersrv + port: {{ .Values.service.ports.interserver }} + protocol: TCP + {{- if and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty .Values.service.nodePorts.interserver)) }} + nodePort: {{ .Values.service.nodePorts.interserver }} + {{- else if eq .Values.service.type "ClusterIP" }} + nodePort: null + {{- end }} + {{- if .Values.metrics.enabled }} + - name: http-metrics + targetPort: http-metrics + port: {{ .Values.service.ports.metrics }} + protocol: TCP + {{- if and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty .Values.service.nodePorts.metrics)) }} + nodePort: {{ .Values.service.nodePorts.metrics }} + {{- else if eq .Values.service.type "ClusterIP" }} + nodePort: null + {{- end }} + {{- end }} + {{- if .Values.service.extraPorts }} + {{- include "common.tplvalues.render" (dict "value" .Values.service.extraPorts "context" $) | nindent 4 }} + {{- end }} + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.podLabels .Values.commonLabels ) "context" . ) }} + selector: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: clickhouse diff --git a/charts/clickhouse/templates/servicemonitor.yaml b/charts/clickhouse/templates/servicemonitor.yaml new file mode 100644 index 00000000..2148b375 --- /dev/null +++ b/charts/clickhouse/templates/servicemonitor.yaml @@ -0,0 +1,47 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and .Values.metrics.enabled .Values.metrics.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "common.names.fullname" . }} + namespace: {{ default (include "common.names.namespace" .) .Values.metrics.serviceMonitor.namespace | quote }} + {{- $labels := include "common.tplvalues.merge" ( dict "values" ( list .Values.metrics.serviceMonitor.labels .Values.commonLabels ) "context" . ) }} + labels: {{- include "common.labels.standard" ( dict "customLabels" $labels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: clickhouse + {{- if or .Values.metrics.serviceMonitor.annotations .Values.commonAnnotations }} + {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.metrics.serviceMonitor.annotations .Values.commonAnnotations ) "context" . ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} + {{- end }} +spec: + jobLabel: {{ .Values.metrics.serviceMonitor.jobLabel | quote }} + selector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 6 }} + {{- if .Values.metrics.serviceMonitor.selector }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.serviceMonitor.selector "context" $) | nindent 6 }} + {{- end }} + endpoints: + - port: http-metrics + path: "/metrics" + {{- if .Values.metrics.serviceMonitor.interval }} + interval: {{ .Values.metrics.serviceMonitor.interval }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.honorLabels }} + honorLabels: {{ .Values.metrics.serviceMonitor.honorLabels }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.metricRelabelings }} + metricRelabelings: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.metricRelabelings "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.relabelings }} + relabelings: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.relabelings "context" $) | nindent 8 }} + {{- end }} + namespaceSelector: + matchNames: + - {{ include "common.names.namespace" . | quote }} +{{- end }} diff --git a/charts/clickhouse/templates/start-scripts-secret.yaml b/charts/clickhouse/templates/start-scripts-secret.yaml new file mode 100644 index 00000000..c579f2e4 --- /dev/null +++ b/charts/clickhouse/templates/start-scripts-secret.yaml @@ -0,0 +1,19 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and .Values.startdbScripts (not .Values.startdbScriptsSecret) }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ printf "%s-start-scripts" (include "common.names.fullname" .) }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: clickhouse + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +stringData: +{{- include "common.tplvalues.render" (dict "value" .Values.startdbScripts "context" .) | nindent 2 }} +{{- end }} diff --git a/charts/clickhouse/templates/statefulset-clickhouse-replica.yaml b/charts/clickhouse/templates/statefulset-clickhouse-replica.yaml deleted file mode 100755 index 9b1db994..00000000 --- a/charts/clickhouse/templates/statefulset-clickhouse-replica.yaml +++ /dev/null @@ -1,184 +0,0 @@ -{{- if .Values.clickhouse.configmap.remote_servers.replica.backup.enabled }} -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: {{ include "clickhouse.fullname" . }}-replica - labels: - app.kubernetes.io/name: {{ include "clickhouse.name" . }}-replica - app.kubernetes.io/instance: {{ .Release.Name }}-replica - app.kubernetes.io/managed-by: {{ .Release.Service }} -spec: - replicas: {{ .Values.clickhouse.replicas }} - podManagementPolicy: {{ .Values.clickhouse.podManagementPolicy }} - updateStrategy: - type: {{ .Values.clickhouse.updateStrategy }} - {{- if (eq "Recreate" .Values.clickhouse.updateStrategy) }} - rollingUpdate: null - {{- else if .Values.clickhouse.rollingUpdatePartition }} - rollingUpdate: - partition: {{ .Values.clickhouse.rollingUpdatePartition }} - {{- end }} - serviceName: {{ include "clickhouse.fullname" . }}-replica-headless - selector: - matchLabels: - app.kubernetes.io/name: {{ include "clickhouse.name" . }}-replica - app.kubernetes.io/instance: {{ .Release.Name }}-replica - template: - metadata: - labels: - app.kubernetes.io/name: {{ include "clickhouse.name" . }}-replica - app.kubernetes.io/instance: {{ .Release.Name }}-replica - spec: - {{- if .Values.affinity }} - affinity: -{{ toYaml .Values.affinity | indent 8 }} - {{- end }} - {{- if .Values.clickhouse.imagePullSecrets }} - imagePullSecrets: - {{- range .Values.clickhouse.imagePullSecrets }} - - name: {{ . | quote }} - {{- end }} - {{- end }} - initContainers: - - name: init - image: busybox:1.31.0 - imagePullPolicy: IfNotPresent - args: - - /bin/sh - - -c - - | - mkdir -p /etc/clickhouse-server/metrica.d - containers: - - name: {{ include "clickhouse.fullname" . }}-replica - image: {{ .Values.clickhouse.image }}:{{ .Values.clickhouse.imageVersion }} - imagePullPolicy: {{ .Values.clickhouse.imagePullPolicy }} - ports: - - name: http-port - containerPort: {{ .Values.clickhouse.http_port | default "8123" }} - - name: tcp-port - containerPort: {{ .Values.clickhouse.tcp_port | default "9000" }} - - name: inter-http-port - containerPort: {{ .Values.clickhouse.interserver_http_port | default "9009" }} - {{- if .Values.clickhouse.livenessProbe.enabled }} - livenessProbe: - tcpSocket: - port: {{ .Values.clickhouse.tcp_port }} - initialDelaySeconds: {{ .Values.clickhouse.livenessProbe.initialDelaySeconds }} - periodSeconds: {{ .Values.clickhouse.livenessProbe.periodSeconds }} - timeoutSeconds: {{ .Values.clickhouse.livenessProbe.timeoutSeconds }} - failureThreshold: {{ .Values.clickhouse.livenessProbe.failureThreshold }} - successThreshold: {{ .Values.clickhouse.livenessProbe.successThreshold }} - {{- end }} - {{- if .Values.clickhouse.readinessProbe.enabled }} - readinessProbe: - tcpSocket: - port: {{ .Values.clickhouse.tcp_port }} - initialDelaySeconds: {{ .Values.clickhouse.readinessProbe.initialDelaySeconds }} - periodSeconds: {{ .Values.clickhouse.readinessProbe.periodSeconds }} - timeoutSeconds: {{ .Values.clickhouse.readinessProbe.timeoutSeconds }} - failureThreshold: {{ .Values.clickhouse.readinessProbe.failureThreshold }} - successThreshold: {{ .Values.clickhouse.readinessProbe.successThreshold }} - {{- end }} - volumeMounts: - - name: {{ include "clickhouse.fullname" . }}-replica-data - mountPath: {{ include "clickhouse.fullpath" . }} - - name: {{ include "clickhouse.fullname" . }}-replica-logs - mountPath: {{ include "clickhouse.logpath" . }} - - name: {{ include "clickhouse.fullname" . }}-config - mountPath: /etc/clickhouse-server/config.d - - name: {{ include "clickhouse.fullname" . }}-metrica - mountPath: /etc/clickhouse-server/metrica.d - - name: {{ include "clickhouse.fullname" . }}-users - mountPath: /etc/clickhouse-server/users.d - securityContext: - privileged: true - runAsUser: 0 - {{- if .Values.clickhouse.imagePullSecrets }} - imagePullSecrets: - - name: {{ .Values.clickhouse.imagePullSecrets }} - {{- end }} - {{- if .Values.clickhouse.nodeSelector }} - nodeSelector: -{{ toYaml .Values.clickhouse.nodeSelector | indent 8 }} - {{- end }} - volumes: - - name: {{ include "clickhouse.fullname" . }}-replica-data - {{- if .Values.clickhouse.persistentVolumeClaim.dataPersistentVolume.enabled }} - persistentVolumeClaim: - claimName: {{ include "clickhouse.fullname" . }}-replica-data - {{- else }} - emptyDir: {} - {{- end }} - - name: {{ include "clickhouse.fullname" . }}-replica-logs - {{- if .Values.clickhouse.persistentVolumeClaim.logsPersistentVolume.enabled }} - persistentVolumeClaim: - claimName: {{ include "clickhouse.fullname" . }}-replica-logs - {{- else }} - emptyDir: {} - {{- end }} - {{- if .Values.clickhouse.configmap.enabled }} - - name: {{ include "clickhouse.fullname" . }}-config - configMap: - name: {{ include "clickhouse.fullname" . }}-config - items: - - key: config.xml - path: config.xml - - name: {{ include "clickhouse.fullname" . }}-metrica - configMap: - name: {{ include "clickhouse.fullname" . }}-metrica - items: - - key: metrica.xml - path: metrica.xml - - name: {{ include "clickhouse.fullname" . }}-users - configMap: - name: {{ include "clickhouse.fullname" . }}-users - items: - - key: users.xml - path: users.xml - {{- end }} -{{- if .Values.clickhouse.persistentVolumeClaim.enabled }} - volumeClaimTemplates: -{{- if .Values.clickhouse.persistentVolumeClaim.dataPersistentVolume.enabled }} - - metadata: - name: {{ include "clickhouse.fullname" . }}-replica-data - labels: - app.kubernetes.io/name: {{ include "clickhouse.name" . }}-replica-data - app.kubernetes.io/instance: {{ .Release.Name }}-replica-data - app.kubernetes.io/managed-by: {{ .Release.Service }} - spec: - accessModes: - {{- range .Values.clickhouse.persistentVolumeClaim.dataPersistentVolume.accessModes }} - - {{ . | quote }} - {{- end }} - {{- if (eq "-" .Values.clickhouse.persistentVolumeClaim.dataPersistentVolume.storageClassName) }} - storageClassName: "" - {{- else }} - storageClassName: {{ .Values.clickhouse.persistentVolumeClaim.dataPersistentVolume.storageClassName }} - {{- end }} - resources: - requests: - storage: {{ .Values.clickhouse.persistentVolumeClaim.dataPersistentVolume.storage }} -{{- end }} -{{- if .Values.clickhouse.persistentVolumeClaim.logsPersistentVolume.enabled }} - - metadata: - name: {{ include "clickhouse.fullname" . }}-replica-logs - labels: - app.kubernetes.io/name: {{ include "clickhouse.name" . }}-replica-logs - app.kubernetes.io/instance: {{ .Release.Name }}-replica-logs - app.kubernetes.io/managed-by: {{ .Release.Service }} - spec: - accessModes: - {{- range .Values.clickhouse.persistentVolumeClaim.logsPersistentVolume.accessModes }} - - {{ . | quote }} - {{- end }} - {{- if (eq "-" .Values.clickhouse.persistentVolumeClaim.logsPersistentVolume.storageClassName) }} - storageClassName: "" - {{- else }} - storageClassName: {{ .Values.clickhouse.persistentVolumeClaim.logsPersistentVolume.storageClassName }} - {{- end }} - resources: - requests: - storage: {{ .Values.clickhouse.persistentVolumeClaim.logsPersistentVolume.storage }} -{{- end }} -{{- end }} -{{- end }} diff --git a/charts/clickhouse/templates/statefulset-clickhouse.yaml b/charts/clickhouse/templates/statefulset-clickhouse.yaml deleted file mode 100755 index ec871274..00000000 --- a/charts/clickhouse/templates/statefulset-clickhouse.yaml +++ /dev/null @@ -1,182 +0,0 @@ -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: {{ include "clickhouse.fullname" . }} - labels: - app.kubernetes.io/name: {{ include "clickhouse.name" . }} - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/managed-by: {{ .Release.Service }} -spec: - replicas: {{ .Values.clickhouse.replicas }} - podManagementPolicy: {{ .Values.clickhouse.podManagementPolicy }} - updateStrategy: - type: {{ .Values.clickhouse.updateStrategy }} - {{- if (eq "Recreate" .Values.clickhouse.updateStrategy) }} - rollingUpdate: null - {{- else if .Values.clickhouse.rollingUpdatePartition }} - rollingUpdate: - partition: {{ .Values.clickhouse.rollingUpdatePartition }} - {{- end }} - serviceName: {{ include "clickhouse.fullname" . }}-headless - selector: - matchLabels: - app.kubernetes.io/name: {{ include "clickhouse.name" . }} - app.kubernetes.io/instance: {{ .Release.Name }} - template: - metadata: - labels: - app.kubernetes.io/name: {{ include "clickhouse.name" . }} - app.kubernetes.io/instance: {{ .Release.Name }} - spec: - {{- if .Values.affinity }} - affinity: -{{ toYaml .Values.affinity | indent 8 }} - {{- end }} - {{- if .Values.clickhouse.imagePullSecrets }} - imagePullSecrets: - {{- range .Values.clickhouse.imagePullSecrets }} - - name: {{ . | quote }} - {{- end }} - {{- end }} - initContainers: - - name: init - image: busybox:1.31.0 - imagePullPolicy: IfNotPresent - args: - - /bin/sh - - -c - - | - mkdir -p /etc/clickhouse-server/metrica.d - containers: - - name: {{ include "clickhouse.fullname" . }} - image: {{ .Values.clickhouse.image }}:{{ .Values.clickhouse.imageVersion }} - imagePullPolicy: {{ .Values.clickhouse.imagePullPolicy }} - ports: - - name: http-port - containerPort: {{ .Values.clickhouse.http_port | default "8123" }} - - name: tcp-port - containerPort: {{ .Values.clickhouse.tcp_port | default "9000" }} - - name: inter-http-port - containerPort: {{ .Values.clickhouse.interserver_http_port | default "9009" }} - {{- if .Values.clickhouse.livenessProbe.enabled }} - livenessProbe: - tcpSocket: - port: {{ .Values.clickhouse.tcp_port }} - initialDelaySeconds: {{ .Values.clickhouse.livenessProbe.initialDelaySeconds }} - periodSeconds: {{ .Values.clickhouse.livenessProbe.periodSeconds }} - timeoutSeconds: {{ .Values.clickhouse.livenessProbe.timeoutSeconds }} - failureThreshold: {{ .Values.clickhouse.livenessProbe.failureThreshold }} - successThreshold: {{ .Values.clickhouse.livenessProbe.successThreshold }} - {{- end }} - {{- if .Values.clickhouse.readinessProbe.enabled }} - readinessProbe: - tcpSocket: - port: {{ .Values.clickhouse.tcp_port }} - initialDelaySeconds: {{ .Values.clickhouse.readinessProbe.initialDelaySeconds }} - periodSeconds: {{ .Values.clickhouse.readinessProbe.periodSeconds }} - timeoutSeconds: {{ .Values.clickhouse.readinessProbe.timeoutSeconds }} - failureThreshold: {{ .Values.clickhouse.readinessProbe.failureThreshold }} - successThreshold: {{ .Values.clickhouse.readinessProbe.successThreshold }} - {{- end }} - volumeMounts: - - name: {{ include "clickhouse.fullname" . }}-data - mountPath: {{ include "clickhouse.fullpath" . }} - - name: {{ include "clickhouse.fullname" . }}-logs - mountPath: {{ include "clickhouse.logpath" . }} - - name: {{ include "clickhouse.fullname" . }}-config - mountPath: /etc/clickhouse-server/config.d - - name: {{ include "clickhouse.fullname" . }}-metrica - mountPath: /etc/clickhouse-server/metrica.d - - name: {{ include "clickhouse.fullname" . }}-users - mountPath: /etc/clickhouse-server/users.d - securityContext: - privileged: true - runAsUser: 0 - {{- if .Values.clickhouse.imagePullSecrets }} - imagePullSecrets: - - name: {{ .Values.clickhouse.imagePullSecrets }} - {{- end }} - {{- if .Values.clickhouse.nodeSelector }} - nodeSelector: -{{ toYaml .Values.clickhouse.nodeSelector | indent 8 }} - {{- end }} - volumes: - - name: {{ include "clickhouse.fullname" . }}-data - {{- if .Values.clickhouse.persistentVolumeClaim.dataPersistentVolume.enabled }} - persistentVolumeClaim: - claimName: {{ include "clickhouse.fullname" . }}-data - {{- else }} - emptyDir: {} - {{- end }} - - name: {{ include "clickhouse.fullname" . }}-logs - {{- if .Values.clickhouse.persistentVolumeClaim.logsPersistentVolume.enabled }} - persistentVolumeClaim: - claimName: {{ include "clickhouse.fullname" . }}-logs - {{- else }} - emptyDir: {} - {{- end }} - {{- if .Values.clickhouse.configmap.enabled }} - - name: {{ include "clickhouse.fullname" . }}-config - configMap: - name: {{ include "clickhouse.fullname" . }}-config - items: - - key: config.xml - path: config.xml - - name: {{ include "clickhouse.fullname" . }}-metrica - configMap: - name: {{ include "clickhouse.fullname" . }}-metrica - items: - - key: metrica.xml - path: metrica.xml - - name: {{ include "clickhouse.fullname" . }}-users - configMap: - name: {{ include "clickhouse.fullname" . }}-users - items: - - key: users.xml - path: users.xml - {{- end }} -{{- if .Values.clickhouse.persistentVolumeClaim.enabled }} - volumeClaimTemplates: -{{- if .Values.clickhouse.persistentVolumeClaim.dataPersistentVolume.enabled }} - - metadata: - name: {{ include "clickhouse.fullname" . }}-data - labels: - app.kubernetes.io/name: {{ include "clickhouse.name" . }}-data - app.kubernetes.io/instance: {{ .Release.Name }}-data - app.kubernetes.io/managed-by: {{ .Release.Service }} - spec: - accessModes: - {{- range .Values.clickhouse.persistentVolumeClaim.dataPersistentVolume.accessModes }} - - {{ . | quote }} - {{- end }} - {{- if (eq "-" .Values.clickhouse.persistentVolumeClaim.dataPersistentVolume.storageClassName) }} - storageClassName: "" - {{- else }} - storageClassName: {{ .Values.clickhouse.persistentVolumeClaim.dataPersistentVolume.storageClassName | quote }} - {{- end }} - resources: - requests: - storage: {{ .Values.clickhouse.persistentVolumeClaim.dataPersistentVolume.storage | quote }} -{{- end }} -{{- if .Values.clickhouse.persistentVolumeClaim.logsPersistentVolume.enabled }} - - metadata: - name: {{ include "clickhouse.fullname" . }}-logs - labels: - app.kubernetes.io/name: {{ include "clickhouse.name" . }}-logs - app.kubernetes.io/instance: {{ .Release.Name }}-logs - app.kubernetes.io/managed-by: {{ .Release.Service }} - spec: - accessModes: - {{- range .Values.clickhouse.persistentVolumeClaim.logsPersistentVolume.accessModes }} - - {{ . | quote }} - {{- end }} - {{- if (eq "-" .Values.clickhouse.persistentVolumeClaim.logsPersistentVolume.storageClassName) }} - storageClassName: "" - {{- else }} - storageClassName: {{ .Values.clickhouse.persistentVolumeClaim.logsPersistentVolume.storageClassName | quote }} - {{- end }} - resources: - requests: - storage: {{ .Values.clickhouse.persistentVolumeClaim.logsPersistentVolume.storage | quote }} -{{- end }} -{{- end }} diff --git a/charts/clickhouse/templates/statefulset.yaml b/charts/clickhouse/templates/statefulset.yaml new file mode 100644 index 00000000..13d526ca --- /dev/null +++ b/charts/clickhouse/templates/statefulset.yaml @@ -0,0 +1,425 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- $shards := .Values.shards | int }} +{{- range $i, $e := until $shards }} +apiVersion: {{ include "common.capabilities.statefulset.apiVersion" $ }} +kind: StatefulSet +metadata: + name: {{ printf "%s-shard%d" (include "common.names.fullname" $ ) $i }} + namespace: {{ include "common.names.namespace" $ | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" $.Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/component: clickhouse + {{- if $.Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $.Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + replicas: {{ $.Values.replicaCount }} + podManagementPolicy: {{ $.Values.podManagementPolicy | quote }} + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list $.Values.podLabels $.Values.commonLabels ) "context" $ ) }} + selector: + matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 6 }} + app.kubernetes.io/component: clickhouse + serviceName: {{ printf "%s-headless" (include "common.names.fullname" $) }} + {{- if $.Values.updateStrategy }} + updateStrategy: {{- toYaml $.Values.updateStrategy | nindent 4 }} + {{- end }} + template: + metadata: + annotations: + checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") $ | sha256sum }} + checksum/config-extra: {{ include (print $.Template.BasePath "/configmap-extra.yaml") $ | sha256sum }} + checksum/config-users-extra: {{ include (print $.Template.BasePath "/configmap-users-extra.yaml") $ | sha256sum }} + {{- if $.Values.podAnnotations }} + {{- include "common.tplvalues.render" (dict "value" $.Values.podAnnotations "context" $) | nindent 8 }} + {{- end }} + {{- if and $.Values.metrics.enabled $.Values.metrics.podAnnotations }} + {{- include "common.tplvalues.render" (dict "value" $.Values.metrics.podAnnotations "context" $) | nindent 8 }} + {{- end }} + labels: {{- include "common.labels.standard" ( dict "customLabels" $podLabels "context" $ ) | nindent 8 }} + app.kubernetes.io/component: clickhouse + shard: {{ $i | quote }} + spec: + serviceAccountName: {{ template "clickhouse.serviceAccountName" $ }} + {{- include "clickhouse.imagePullSecrets" $ | nindent 6 }} + {{- if $.Values.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" $.Values.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if $.Values.affinity }} + affinity: {{- include "common.tplvalues.render" ( dict "value" $.Values.affinity "context" $) | nindent 8 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" $.Values.podAffinityPreset "component" "clickhouse" "customLabels" $podLabels "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" $.Values.podAntiAffinityPreset "component" "clickhouse" "customLabels" $podLabels "extraPodAffinityTerms" (ternary (list (dict "extraMatchLabels" (dict "shard" $i) "topologyKey" "topology.kubernetes.io/zone")) (list) $.Values.distributeReplicasByZone) "context" $) | nindent 10 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" $.Values.nodeAffinityPreset.type "key" $.Values.nodeAffinityPreset.key "values" $.Values.nodeAffinityPreset.values) | nindent 10 }} + {{- end }} + {{- if $.Values.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" ( dict "value" $.Values.nodeSelector "context" $) | nindent 8 }} + {{- end }} + {{- if $.Values.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" $.Values.tolerations "context" $) | nindent 8 }} + {{- end }} + {{- if $.Values.priorityClassName }} + priorityClassName: {{ $.Values.priorityClassName | quote }} + {{- end }} + {{- if $.Values.schedulerName }} + schedulerName: {{ $.Values.schedulerName | quote }} + {{- end }} + {{- if $.Values.topologySpreadConstraints }} + topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" $.Values.topologySpreadConstraints "context" $) | nindent 8 }} + {{- end }} + {{- if $.Values.podSecurityContext.enabled }} + securityContext: {{- omit $.Values.podSecurityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + {{- if $.Values.terminationGracePeriodSeconds }} + terminationGracePeriodSeconds: {{ $.Values.terminationGracePeriodSeconds }} + {{- end }} + initContainers: + {{- if and $.Values.tls.enabled (not $.Values.volumePermissions.enabled) }} + - name: copy-certs + image: {{ include "clickhouse.volumePermissions.image" $ }} + imagePullPolicy: {{ $.Values.volumePermissions.image.pullPolicy | quote }} + {{- if $.Values.resources }} + resources: {{- toYaml $.Values.resources | nindent 12 }} + {{- end }} + {{- if $.Values.containerSecurityContext.enabled }} + # We don't require a privileged container in this case + securityContext: {{- omit $.Values.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + command: + - /bin/sh + - -ec + - | + cp -L /tmp/certs/* /opt/bitnami/clickhouse/certs/ + chmod 600 {{ include "clickhouse.tlsCertKey" $ }} + volumeMounts: + - name: raw-certificates + mountPath: /tmp/certs + - name: clickhouse-certificates + mountPath: /opt/bitnami/clickhouse/certs + {{- else if and $.Values.volumePermissions.enabled $.Values.persistence.enabled }} + - name: volume-permissions + image: {{ include "clickhouse.volumePermissions.image" $ }} + imagePullPolicy: {{ $.Values.volumePermissions.image.pullPolicy | quote }} + command: + - /bin/sh + - -ec + - | + mkdir -p /bitnami/clickhouse/data + chmod 700 /bitnami/clickhouse/data + {{- if $.Values.keeper.enabled }} + mkdir -p /bitnami/clickhouse/keeper + chmod 700 /bitnami/clickhouse/keeper + {{- end }} + chown {{ $.Values.containerSecurityContext.runAsUser }}:{{ $.Values.podSecurityContext.fsGroup }} /bitnami/clickhouse + find /bitnami/clickhouse -mindepth 1 -maxdepth 1 -not -name ".snapshot" -not -name "lost+found" | \ + xargs -r chown -R {{ $.Values.containerSecurityContext.runAsUser }}:{{ $.Values.podSecurityContext.fsGroup }} + {{- if $.Values.tls.enabled }} + cp /tmp/certs/* /opt/bitnami/clickhouse/certs/ + {{- if eq ( toString ( $.Values.volumePermissions.containerSecurityContext.runAsUser )) "auto" }} + chown -R `id -u`:`id -G | cut -d " " -f2` /opt/bitnami/clickhouse/certs/ + {{- else }} + chown -R {{ $.Values.containerSecurityContext.runAsUser }}:{{ $.Values.podSecurityContext.fsGroup }} /opt/bitnami/clickhouse/certs/ + {{- end }} + chmod 600 {{ include "clickhouse.tlsCertKey" $ }} + {{- end }} + securityContext: {{- include "common.tplvalues.render" (dict "value" $.Values.volumePermissions.containerSecurityContext "context" $) | nindent 12 }} + {{- if $.Values.volumePermissions.resources }} + resources: {{- toYaml $.Values.volumePermissions.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: data + mountPath: /bitnami/clickhouse + {{- if $.Values.tls.enabled }} + - name: raw-certificates + mountPath: /tmp/certs + - name: clickhouse-certificates + mountPath: /opt/bitnami/clickhouse/certs + {{- end }} + {{- end }} + {{- if $.Values.initContainers }} + {{- include "common.tplvalues.render" (dict "value" $.Values.initContainers "context" $) | nindent 8 }} + {{- end }} + containers: + - name: clickhouse + image: {{ template "clickhouse.image" $ }} + imagePullPolicy: {{ $.Values.image.pullPolicy }} + {{- if $.Values.containerSecurityContext.enabled }} + securityContext: {{- omit $.Values.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if $.Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" $.Values.diagnosticMode.command "context" $) | nindent 12 }} + {{- else if $.Values.command }} + command: {{- include "common.tplvalues.render" (dict "value" $.Values.command "context" $) | nindent 12 }} + {{- end }} + {{- if $.Values.diagnosticMode.enabled }} + args: {{- include "common.tplvalues.render" (dict "value" $.Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else if $.Values.args }} + args: {{- include "common.tplvalues.render" (dict "value" $.Values.args "context" $) | nindent 12 }} + {{- end }} + env: + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" (or $.Values.image.debug $.Values.diagnosticMode.enabled) | quote }} + - name: CLICKHOUSE_HTTP_PORT + value: {{ $.Values.containerPorts.http | quote }} + - name: CLICKHOUSE_TCP_PORT + value: {{ $.Values.containerPorts.tcp | quote }} + - name: CLICKHOUSE_MYSQL_PORT + value: {{ $.Values.containerPorts.mysql | quote }} + - name: CLICKHOUSE_POSTGRESQL_PORT + value: {{ $.Values.containerPorts.postgresql | quote }} + - name: CLICKHOUSE_INTERSERVER_HTTP_PORT + value: {{ $.Values.containerPorts.interserver | quote }} + {{- if $.Values.tls.enabled }} + - name: CLICKHOUSE_TCP_SECURE_PORT + value: {{ $.Values.containerPorts.tcpSecure | quote }} + - name: CLICKHOUSE_HTTPS_PORT + value: {{ $.Values.containerPorts.https | quote }} + {{- end }} + {{- if $.Values.keeper.enabled }} + - name: CLICKHOUSE_KEEPER_PORT + value: {{ $.Values.containerPorts.keeper | quote }} + - name: CLICKHOUSE_KEEPER_INTER_PORT + value: {{ $.Values.containerPorts.keeperInter | quote }} + {{- if $.Values.tls.enabled }} + - name: CLICKHOUSE_KEEPER_SECURE_PORT + value: {{ $.Values.containerPorts.keeperSecure | quote }} + {{- end }} + {{- end }} + {{- if $.Values.metrics.enabled }} + - name: CLICKHOUSE_METRICS_PORT + value: {{ $.Values.containerPorts.metrics | quote }} + {{- end }} + - name: CLICKHOUSE_SHARD_ID + value: {{ printf "shard%d" $i | quote }} + - name: CLICKHOUSE_REPLICA_ID + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: ALLOW_EMPTY_PASSWORD + value: "yes" + {{- if $.Values.tls.enabled }} + - name: CLICKHOUSE_TLS_CERT_FILE + value: {{ include "clickhouse.tlsCert" $ | quote}} + - name: CLICKHOUSE_TLS_KEY_FILE + value: {{ include "clickhouse.tlsCertKey" $ | quote }} + - name: CLICKHOUSE_TLS_CA_FILE + value: {{ include "clickhouse.tlsCACert" $ | quote }} + {{- end }} + {{- if $.Values.extraEnvVars }} + {{- include "common.tplvalues.render" (dict "value" $.Values.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + {{- if $.Values.keeper.enabled }} + {{- $replicas := $.Values.replicaCount | int }} + {{- range $j, $r := until $replicas }} + - name: {{ printf "KEEPER_NODE_%d" $j }} + value: {{ printf "%s-shard%d-%d.%s.%s.svc.%s" (include "common.names.fullname" $ ) $i $j (include "clickhouse.headlessServiceName" $) (include "common.names.namespace" $) $.Values.clusterDomain }} + {{- end }} + {{- else if $.Values.zookeeper.enabled }} + {{- $replicas := $.Values.zookeeper.replicaCount | int }} + {{- range $j, $r := until $replicas }} + - name: {{ printf "KEEPER_NODE_%d" $j }} + value: {{ printf "%s-%d.%s.%s.svc.%s" (include "clickhouse.zookeeper.fullname" $ ) $j (include "clickhouse.zookeeper.headlessServiceName" $) (include "common.names.namespace" $) $.Values.clusterDomain }} + {{- end }} + {{- end }} + envFrom: + {{- if $.Values.extraEnvVarsCM }} + - configMapRef: + name: {{ include "common.tplvalues.render" (dict "value" $.Values.extraEnvVarsCM "context" $) }} + {{- end }} + {{- if $.Values.extraEnvVarsSecret }} + - secretRef: + name: {{ include "common.tplvalues.render" (dict "value" $.Values.extraEnvVarsSecret "context" $) }} + {{- end }} + {{- if $.Values.resources }} + resources: {{- toYaml $.Values.resources | nindent 12 }} + {{- end }} + ports: + - name: http + containerPort: {{ $.Values.containerPorts.http }} + - name: tcp + containerPort: {{ $.Values.containerPorts.tcp }} + {{- if $.Values.tls.enabled }} + - name: https + containerPort: {{ $.Values.containerPorts.https }} + - name: tcp-secure + containerPort: {{ $.Values.containerPorts.tcpSecure }} + {{- end }} + {{- if $.Values.keeper.enabled }} + - name: tcp-keeper + containerPort: {{ $.Values.containerPorts.keeper }} + - name: tcp-keeperinter + containerPort: {{ $.Values.containerPorts.keeperInter }} + {{- if $.Values.tls.enabled }} + - name: tcp-keepertls + containerPort: {{ $.Values.containerPorts.keeperSecure }} + {{- end }} + {{- end }} + - name: tcp-postgresql + containerPort: {{ $.Values.containerPorts.postgresql }} + - name: tcp-mysql + containerPort: {{ $.Values.containerPorts.mysql }} + - name: http-intersrv + containerPort: {{ $.Values.containerPorts.interserver }} + {{- if $.Values.metrics.enabled }} + - name: http-metrics + containerPort: {{ $.Values.containerPorts.metrics }} + {{- end }} + {{- if not $.Values.diagnosticMode.enabled }} + {{- if $.Values.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" $.Values.customLivenessProbe "context" $) | nindent 12 }} + {{- else if $.Values.livenessProbe.enabled }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" (omit $.Values.livenessProbe "enabled") "context" $) | nindent 12 }} + httpGet: + path: /ping + port: http + {{- end }} + {{- if $.Values.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" $.Values.customReadinessProbe "context" $) | nindent 12 }} + {{- else if $.Values.readinessProbe.enabled }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" (omit $.Values.readinessProbe "enabled") "context" $) | nindent 12 }} + httpGet: + path: /ping + port: http + {{- end }} + {{- if $.Values.customStartupProbe }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" $.Values.customStartupProbe "context" $) | nindent 12 }} + {{- else if $.Values.startupProbe.enabled }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit $.Values.startupProbe "enabled") "context" $) | nindent 12 }} + httpGet: + path: /ping + port: http + {{- end }} + {{- end }} + {{- if $.Values.lifecycleHooks }} + lifecycle: {{- include "common.tplvalues.render" (dict "value" $.Values.lifecycleHooks "context" $) | nindent 12 }} + {{- end }} + volumeMounts: + - name: scripts + mountPath: /scripts/setup.sh + subPath: setup.sh + - name: data + mountPath: /bitnami/clickhouse + - name: config + mountPath: /bitnami/clickhouse/etc/conf.d/default + {{- if or $.Values.extraOverridesConfigmap $.Values.extraOverrides }} + - name: extra-config + mountPath: /bitnami/clickhouse/etc/conf.d/extra-configmap + {{- end }} + {{- if or $.Values.usersExtraOverridesConfigmap $.Values.usersExtraOverrides }} + - name: users-extra-config + mountPath: /bitnami/clickhouse/etc/users.d/users-extra-configmap + {{- end }} + {{- if $.Values.extraOverridesSecret }} + - name: extra-secret + mountPath: /bitnami/clickhouse/etc/conf.d/extra-secret + {{- end }} + {{- if $.Values.usersExtraOverridesSecret }} + - name: users-extra-secret + mountPath: /bitnami/clickhouse/etc/users.d/users-extra-secret + {{- end }} + {{- if $.Values.tls.enabled }} + - name: clickhouse-certificates + mountPath: /bitnami/clickhouse/certs + {{- end }} + {{- if or $.Values.initdbScriptsSecret $.Values.initdbScripts }} + - name: custom-init-scripts + mountPath: /docker-entrypoint-initdb.d + {{- end }} + {{- if or $.Values.startdbScriptsSecret $.Values.startdbScripts }} + - name: custom-start-scripts + mountPath: /docker-entrypoint-startdb.d + {{- end }} + {{- if $.Values.extraVolumeMounts }} + {{- include "common.tplvalues.render" (dict "value" $.Values.extraVolumeMounts "context" $) | nindent 12 }} + {{- end }} + {{- if $.Values.sidecars }} + {{- include "common.tplvalues.render" ( dict "value" $.Values.sidecars "context" $) | nindent 8 }} + {{- end }} + volumes: + - name: scripts + configMap: + name: {{ printf "%s-scripts" (include "common.names.fullname" $) }} + defaultMode: 0755 + - name: config + configMap: + name: {{ template "clickhouse.configmapName" $ }} + {{- if or $.Values.initdbScriptsSecret $.Values.initdbScripts }} + - name: custom-init-scripts + secret: + secretName: {{ include "clickhouse.initdbScriptsSecret" $ }} + {{- end }} + {{- if or $.Values.startdbScriptsSecret $.Values.startdbScripts }} + - name: custom-start-scripts + secret: + secretName: {{ include "clickhouse.startdbScriptsSecret" $ }} + {{- end }} + {{- if or $.Values.extraOverridesConfigmap $.Values.extraOverrides }} + - name: extra-config + configMap: + name: {{ template "clickhouse.extraConfigmapName" $ }} + {{- end }} + {{- if or $.Values.usersExtraOverridesConfigmap $.Values.usersExtraOverrides }} + - name: users-extra-config + configMap: + name: {{ template "clickhouse.usersExtraConfigmapName" $ }} + {{- end }} + {{- if $.Values.extraOverridesSecret }} + - name: extra-secret + secret: + secretName: {{ $.Values.extraOverridesSecret }} + {{- end }} + {{- if $.Values.usersExtraOverridesSecret }} + - name: users-extra-secret + secret: + secretName: {{ $.Values.usersExtraOverridesSecret }} + {{- end }} + {{- if not $.Values.persistence.enabled }} + - name: data + emptyDir: {} + {{- else if $.Values.persistence.existingClaim }} + - name: data + persistentVolumeClaim: + claimName: {{ tpl $.Values.persistence.existingClaim $ }} + {{- end }} + {{- if $.Values.tls.enabled }} + - name: raw-certificates + secret: + secretName: {{ include "clickhouse.tlsSecretName" $ }} + - name: clickhouse-certificates + emptyDir: {} + {{- end }} + {{- if $.Values.extraVolumes }} + {{- include "common.tplvalues.render" (dict "value" $.Values.extraVolumes "context" $) | nindent 8 }} + {{- end }} + {{- if and $.Values.persistence.enabled (not $.Values.persistence.existingClaim) }} + volumeClaimTemplates: + - metadata: + name: data + {{- if or $.Values.persistence.annotations $.Values.commonAnnotations }} + {{- $claimAnnotations := include "common.tplvalues.merge" ( dict "values" ( list $.Values.persistence.annotations $.Values.commonLabels ) "context" $ ) }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $claimAnnotations "context" $ ) | nindent 10 }} + {{- end }} + {{- $claimLabels := include "common.tplvalues.merge" ( dict "values" ( list $.Values.persistence.labels $.Values.commonLabels ) "context" $ ) }} + labels: {{- include "common.labels.matchLabels" ( dict "customLabels" $claimLabels "context" $ ) | nindent 10 }} + app.kubernetes.io/component: clickhouse + spec: + accessModes: + {{- range $.Values.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ $.Values.persistence.size | quote }} + {{- if $.Values.persistence.selector }} + selector: {{- include "common.tplvalues.render" (dict "value" $.Values.persistence.selector "context" $) | nindent 10 }} + {{- end }} + {{- if $.Values.persistence.dataSource }} + dataSource: {{- include "common.tplvalues.render" (dict "value" $.Values.persistence.dataSource "context" $) | nindent 10 }} + {{- end }} + {{- include "common.storage.class" (dict "persistence" $.Values.persistence "global" $.Values.global) | nindent 8 }} + {{- end }} +--- +{{- end }} diff --git a/charts/clickhouse/templates/svc-clickhouse-headless.yaml b/charts/clickhouse/templates/svc-clickhouse-headless.yaml deleted file mode 100755 index 980c2773..00000000 --- a/charts/clickhouse/templates/svc-clickhouse-headless.yaml +++ /dev/null @@ -1,26 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: {{ include "clickhouse.fullname" . }}-headless - labels: - app.kubernetes.io/name: {{ include "clickhouse.name" . }}-headless - app.kubernetes.io/instance: {{ .Release.Name }}-headless - app.kubernetes.io/managed-by: {{ .Release.Service }} -spec: - clusterIP: "None" - ports: - - port: {{ .Values.clickhouse.tcp_port }} - targetPort: tcp-port - protocol: TCP - name: tcp-port - - port: {{ .Values.clickhouse.http_port }} - targetPort: http-port - protocol: TCP - name: http-port - - port: {{ .Values.clickhouse.interserver_http_port }} - targetPort: inter-http-port - protocol: TCP - name: inter-http-port - selector: - app.kubernetes.io/name: {{ include "clickhouse.name" . }} - app.kubernetes.io/instance: {{ .Release.Name }} diff --git a/charts/clickhouse/templates/svc-clickhouse-replica-headless.yaml b/charts/clickhouse/templates/svc-clickhouse-replica-headless.yaml deleted file mode 100755 index b26448d9..00000000 --- a/charts/clickhouse/templates/svc-clickhouse-replica-headless.yaml +++ /dev/null @@ -1,26 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: {{ include "clickhouse.fullname" . }}-replica-headless - labels: - app.kubernetes.io/name: {{ include "clickhouse.name" . }}-replica-headless - app.kubernetes.io/instance: {{ .Release.Name }}-replica-headless - app.kubernetes.io/managed-by: {{ .Release.Service }} -spec: - clusterIP: "None" - ports: - - port: {{ .Values.clickhouse.tcp_port }} - targetPort: tcp-port - protocol: TCP - name: tcp-port - - port: {{ .Values.clickhouse.http_port }} - targetPort: http-port - protocol: TCP - name: http-port - - port: {{ .Values.clickhouse.interserver_http_port }} - targetPort: inter-http-port - protocol: TCP - name: inter-http-port - selector: - app.kubernetes.io/name: {{ include "clickhouse.name" . }}-replica - app.kubernetes.io/instance: {{ .Release.Name }}-replica diff --git a/charts/clickhouse/templates/svc-clickhouse-replica.yaml b/charts/clickhouse/templates/svc-clickhouse-replica.yaml deleted file mode 100755 index b3fd7eef..00000000 --- a/charts/clickhouse/templates/svc-clickhouse-replica.yaml +++ /dev/null @@ -1,25 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: {{ include "clickhouse.fullname" . }}-replica - labels: - app.kubernetes.io/name: {{ include "clickhouse.name" . }}-replica - app.kubernetes.io/instance: {{ .Release.Name }}-replica - app.kubernetes.io/managed-by: {{ .Release.Service }} -spec: - ports: - - port: {{ .Values.clickhouse.tcp_port }} - targetPort: tcp-port - protocol: TCP - name: tcp-port - - port: {{ .Values.clickhouse.http_port }} - targetPort: http-port - protocol: TCP - name: http-port - - port: {{ .Values.clickhouse.interserver_http_port }} - targetPort: inter-http-port - protocol: TCP - name: inter-http-port - selector: - app.kubernetes.io/name: {{ include "clickhouse.name" . }}-replica - app.kubernetes.io/instance: {{ .Release.Name }}-replica diff --git a/charts/clickhouse/templates/svc-clickhouse.yaml b/charts/clickhouse/templates/svc-clickhouse.yaml deleted file mode 100755 index b73c81a5..00000000 --- a/charts/clickhouse/templates/svc-clickhouse.yaml +++ /dev/null @@ -1,25 +0,0 @@ -apiVersion: v1 -kind: Service -metadata: - name: {{ include "clickhouse.fullname" . }} - labels: - app.kubernetes.io/name: {{ include "clickhouse.name" . }} - app.kubernetes.io/instance: {{ .Release.Name }} - app.kubernetes.io/managed-by: {{ .Release.Service }} -spec: - ports: - - port: {{ .Values.clickhouse.tcp_port }} - targetPort: tcp-port - protocol: TCP - name: tcp-port - - port: {{ .Values.clickhouse.http_port }} - targetPort: http-port - protocol: TCP - name: http-port - - port: {{ .Values.clickhouse.interserver_http_port }} - targetPort: inter-http-port - protocol: TCP - name: inter-http-port - selector: - app.kubernetes.io/name: {{ include "clickhouse.name" . }} - app.kubernetes.io/instance: {{ .Release.Name }} diff --git a/charts/clickhouse/templates/svc-tabix.yaml b/charts/clickhouse/templates/svc-tabix.yaml deleted file mode 100755 index 56df5caa..00000000 --- a/charts/clickhouse/templates/svc-tabix.yaml +++ /dev/null @@ -1,19 +0,0 @@ -{{- if .Values.tabix.enabled }} -apiVersion: v1 -kind: Service -metadata: - name: {{ include "clickhouse.fullname" . }}-tabix - labels: - app.kubernetes.io/name: {{ include "clickhouse.name" . }}-tabix - app.kubernetes.io/instance: {{ .Release.Name }}-tabix - app.kubernetes.io/managed-by: {{ .Release.Service }} -spec: - ports: - - port: 80 - targetPort: http - protocol: TCP - name: http - selector: - app.kubernetes.io/name: {{ include "clickhouse.name" . }}-tabix - app.kubernetes.io/instance: {{ .Release.Name }}-tabix -{{- end }} diff --git a/charts/clickhouse/templates/tls-secret.yaml b/charts/clickhouse/templates/tls-secret.yaml new file mode 100644 index 00000000..04b188e1 --- /dev/null +++ b/charts/clickhouse/templates/tls-secret.yaml @@ -0,0 +1,29 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if (include "clickhouse.createTlsSecret" . ) }} +{{- $secretName := printf "%s-crt" (include "common.names.fullname" .) }} +{{- $ca := genCA "clickhouse-ca" 365 }} +{{- $fullname := include "common.names.fullname" . }} +{{- $releaseNamespace := .Release.Namespace }} +{{- $clusterDomain := .Values.clusterDomain }} +{{- $primaryHeadlessServiceName := printf "%s-headless" (include "common.names.fullname" .)}} +{{- $altNames := list (printf "*.%s.%s.svc.%s" $fullname $releaseNamespace $clusterDomain) (printf "%s.%s.svc.%s" $fullname $releaseNamespace $clusterDomain) (printf "*.%s.%s.svc.%s" $primaryHeadlessServiceName $releaseNamespace $clusterDomain) (printf "%s.%s.svc.%s" $primaryHeadlessServiceName $releaseNamespace $clusterDomain) $fullname }} +{{- $cert := genSignedCert $fullname nil $altNames 365 $ca }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ $secretName }} + namespace: {{ .Release.Namespace | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: kubernetes.io/tls +data: + tls.crt: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "tls.crt" "defaultValue" $cert.Cert "context" $) }} + tls.key: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "tls.key" "defaultValue" $cert.Key "context" $) }} + ca.crt: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "ca.crt" "defaultValue" $ca.Cert "context" $) }} +{{- end }} diff --git a/charts/clickhouse/values.yaml b/charts/clickhouse/values.yaml old mode 100755 new mode 100644 index 2989bd88..444f13fa --- a/charts/clickhouse/values.yaml +++ b/charts/clickhouse/values.yaml @@ -1,374 +1,1131 @@ -## Timezone -timezone: "Asia/Shanghai" - -## Cluster domain -clusterDomain: "cluster.local" - -## -## Clickhouse Node selectors and tolerations for pod assignment -## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector -## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature -## -# nodeSelector: {"beta.kubernetes.io/arch": "amd64"} -# tolerations: [] -## Clickhouse pod/node affinity/anti-affinity -## -#affinity: -# nodeAffinity: -# requiredDuringSchedulingIgnoredDuringExecution: -# nodeSelectorTerms: -# - matchExpressions: -# - key: "application/clickhouse" -# operator: In -# values: -# - "true" - -clickhouse: - ## StatefulSet controller supports relax its ordering guarantees while preserving its uniqueness and identity guarantees. There are two valid pod management policies: OrderedReady and Parallel - ## ref: https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#pod-management-policy - ## - podManagementPolicy: "Parallel" - - ## StatefulSet controller supports automated updates. There are two valid update strategies: RollingUpdate and OnDelete - ## ref: https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets - ## - updateStrategy: "RollingUpdate" - - ## Partition update strategy - ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions - ## - # rollingUpdatePartition: - - ## - ## The path to the directory containing data. - ## Default value: /var/lib/clickhouse - path: "/var/lib/clickhouse" - ## - ## The port for connecting to the server over HTTP - http_port: "8123" - ## - ## Port for communicating with clients over the TCP protocol. - tcp_port: "9000" - ## - ## Port for exchanging data between ClickHouse servers. - interserver_http_port: "9009" - ## - ## The instance number of Clickhouse - replicas: "3" - ## Clickhouse image configuration. - image: "clickhouse/clickhouse-server" - imageVersion: "23.6.2.18" - imagePullPolicy: "IfNotPresent" - #imagePullSecrets: - ## Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. - ## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - livenessProbe: - enabled: true - initialDelaySeconds: "30" - periodSeconds: "30" - timeoutSeconds: "5" - failureThreshold: "3" - successThreshold: "1" - ## Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. - ## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - readinessProbe: - enabled: true - initialDelaySeconds: "30" - periodSeconds: "30" - timeoutSeconds: "5" - failureThreshold: "3" - successThreshold: "1" - ## volumeClaimTemplates is a list of claims that pods are allowed to reference. - ## The StatefulSet controller is responsible for mapping network identities to claims in a way that maintains the identity of a pod. - ## Every claim in this list must have at least one matching (by name) volumeMount in one container in the template. - ## A claim in this list takes precedence over any volumes in the template, with the same name. - persistentVolumeClaim: - enabled: false - ## Clickhouse data volume - dataPersistentVolume: - enabled: false - accessModes: - - "ReadWriteOnce" - storageClassName: "-" - storage: "500Gi" - ## Clickhouse logs volume - logsPersistentVolume: - enabled: false - accessModes: - - "ReadWriteOnce" - storageClassName: "-" - storage: "50Gi" - ## - ## An API object that manages external access to the services in a cluster, typically HTTP. - ## Ingress can provide load balancing, SSL termination and name-based virtual hosting. - ingress: - enabled: false - # host: "clickhouse.domain.com" - # path: "/" - # tls: - # enabled: false - # hosts: - # - "clickhouse.domain.com" - # - "clickhouse.domain1.com" - # secretName: "clickhouse-secret" - ## - ## Clickhouse config.xml and metrica.xml - configmap: +# Copyright VMware, Inc. +# SPDX-License-Identifier: APACHE-2.0 + +## @section Global parameters +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry, imagePullSecrets and storageClass +## + +## @param global.imageRegistry Global Docker image registry +## @param global.imagePullSecrets Global Docker registry secret names as an array +## @param global.storageClass Global StorageClass for Persistent Volume(s) +## +global: + imageRegistry: "" + ## E.g. + ## imagePullSecrets: + ## - myRegistryKeySecretName + ## + imagePullSecrets: [] + storageClass: "" + +## @section Common parameters +## + +## @param kubeVersion Override Kubernetes version +## +kubeVersion: "" +## @param nameOverride String to partially override common.names.name +## +nameOverride: "" +## @param fullnameOverride String to fully override common.names.fullname +## +fullnameOverride: "" +## @param namespaceOverride String to fully override common.names.namespace +## +namespaceOverride: "" +## @param commonLabels Labels to add to all deployed objects +## +commonLabels: {} +## @param commonAnnotations Annotations to add to all deployed objects +## +commonAnnotations: {} +## @param clusterDomain Kubernetes cluster domain name +## +clusterDomain: cluster.local +## @param extraDeploy Array of extra objects to deploy with the release +## +extraDeploy: [] + +## Enable diagnostic mode in the deployment +## +diagnosticMode: + ## @param diagnosticMode.enabled Enable diagnostic mode (all probes will be disabled and the command will be overridden) + ## + enabled: false + ## @param diagnosticMode.command Command to override all containers in the deployment + ## + command: + - sleep + ## @param diagnosticMode.args Args to override all containers in the deployment + ## + args: + - infinity + +## @section ClickHouse Parameters +## + +## Bitnami ClickHouse image +## ref: https://hub.docker.com/r/bitnami/clickhouse/tags/ +## @param image.registry [default: REGISTRY_NAME] ClickHouse image registry +## @param image.repository [default: REPOSITORY_NAME/clickhouse] ClickHouse image repository +## @skip image.tag ClickHouse image tag (immutable tags are recommended) +## @param image.digest ClickHouse image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag +## @param image.pullPolicy ClickHouse image pull policy +## @param image.pullSecrets ClickHouse image pull secrets +## @param image.debug Enable ClickHouse image debug mode +## +image: + registry: docker.io + repository: bitnami/clickhouse + tag: 23.10.5-debian-11-r0 + digest: "" + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## e.g: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Enable debug mode + ## + debug: false +## @param shards Number of ClickHouse shards to deploy +## +shards: 2 + +## @param replicaCount Number of ClickHouse replicas per shard to deploy +## if keeper enable, same as keeper count, keeper cluster by shards. +## +replicaCount: 3 + +## @param distributeReplicasByZone Schedules replicas of the same shard to different availability zones +## +distributeReplicasByZone: false +## @param containerPorts.http ClickHouse HTTP container port +## @param containerPorts.https ClickHouse HTTPS container port +## @param containerPorts.tcp ClickHouse TCP container port +## @param containerPorts.tcpSecure ClickHouse TCP (secure) container port +## @param containerPorts.keeper ClickHouse keeper TCP container port +## @param containerPorts.keeperSecure ClickHouse keeper TCP (secure) container port +## @param containerPorts.keeperInter ClickHouse keeper interserver TCP container port +## @param containerPorts.mysql ClickHouse MySQL container port +## @param containerPorts.postgresql ClickHouse PostgreSQL container port +## @param containerPorts.interserver ClickHouse Interserver container port +## @param containerPorts.metrics ClickHouse metrics container port +## +containerPorts: + http: 8123 + https: 8443 + tcp: 9000 + tcpSecure: 9440 + keeper: 2181 + keeperSecure: 3181 + keeperInter: 9444 + mysql: 9004 + postgresql: 9005 + interserver: 9009 + metrics: 8001 +## Configure extra options for ClickHouse containers' liveness and readiness probes +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes +## @param livenessProbe.enabled Enable livenessProbe on ClickHouse containers +## @param livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe +## @param livenessProbe.periodSeconds Period seconds for livenessProbe +## @param livenessProbe.timeoutSeconds Timeout seconds for livenessProbe +## @param livenessProbe.failureThreshold Failure threshold for livenessProbe +## @param livenessProbe.successThreshold Success threshold for livenessProbe +## +livenessProbe: + enabled: true + failureThreshold: 3 + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 +## @param readinessProbe.enabled Enable readinessProbe on ClickHouse containers +## @param readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe +## @param readinessProbe.periodSeconds Period seconds for readinessProbe +## @param readinessProbe.timeoutSeconds Timeout seconds for readinessProbe +## @param readinessProbe.failureThreshold Failure threshold for readinessProbe +## @param readinessProbe.successThreshold Success threshold for readinessProbe +## +readinessProbe: + enabled: true + failureThreshold: 3 + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 +## @param startupProbe.enabled Enable startupProbe on ClickHouse containers +## @param startupProbe.initialDelaySeconds Initial delay seconds for startupProbe +## @param startupProbe.periodSeconds Period seconds for startupProbe +## @param startupProbe.timeoutSeconds Timeout seconds for startupProbe +## @param startupProbe.failureThreshold Failure threshold for startupProbe +## @param startupProbe.successThreshold Success threshold for startupProbe +## +startupProbe: + enabled: false + failureThreshold: 3 + initialDelaySeconds: 10 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 +## @param customLivenessProbe Custom livenessProbe that overrides the default one +## +customLivenessProbe: {} +## @param customReadinessProbe Custom readinessProbe that overrides the default one +## +customReadinessProbe: {} +## @param customStartupProbe Custom startupProbe that overrides the default one +## +customStartupProbe: {} +## ClickHouse resource requests and limits +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## @param resources.limits The resources limits for the ClickHouse containers +## @param resources.requests The requested resources for the ClickHouse containers +## +resources: + limits: {} + requests: {} +## Configure Pods Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod +## @param podSecurityContext.enabled Enabled ClickHouse pods' Security Context +## @param podSecurityContext.fsGroup Set ClickHouse pod's Security Context fsGroup +## If you are using Kubernetes 1.18, the following code needs to be commented out. +## +podSecurityContext: + enabled: true + fsGroup: 1001 +## Configure Container Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container +## @param containerSecurityContext.enabled Enable containers' Security Context +## @param containerSecurityContext.runAsUser Set containers' Security Context runAsUser +## @param containerSecurityContext.runAsNonRoot Set containers' Security Context runAsNonRoot +## @param containerSecurityContext.readOnlyRootFilesystem Set read only root file system pod's +## @param containerSecurityContext.privileged Set contraller container's Security Context privileged +## @param containerSecurityContext.allowPrivilegeEscalation Set contraller container's Security Context allowPrivilegeEscalation +## @param containerSecurityContext.capabilities.drop List of capabilities to be droppedn +## @param containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile +## +containerSecurityContext: + enabled: true + runAsUser: 1001 + runAsNonRoot: true + privileged: false + allowPrivilegeEscalation: false + readOnlyRootFilesystem: false + capabilities: + drop: ["ALL"] + seccompProfile: + type: "RuntimeDefault" + +## Authentication +## @param auth.username ClickHouse Admin username +## @param auth.password ClickHouse Admin password +## @param auth.existingSecret Name of a secret containing the Admin password +## @param auth.existingSecretKey Name of the key inside the existing secret +## +auth: + username: "" + password: "" + existingSecret: "" + existingSecretKey: "" + +## @param logLevel Logging level +## +logLevel: information + +## @section ClickHouse keeper configuration parameters +## @param keeper.enabled Deploy ClickHouse keeper. Support is experimental. +## +keeper: + enabled: false + +## @param defaultConfigurationOverrides [string] Default configuration overrides (evaluated as a template) +## +defaultConfigurationOverrides: | + + + + + + {{ include "common.names.fullname" . }} + + + + {{ .Values.logLevel }} + + {{- if or (ne (int .Values.shards) 1) (ne (int .Values.replicaCount) 1)}} + + + + {{- $shards := $.Values.shards | int }} + {{- range $shard, $e := until $shards }} + + {{- $replicas := $.Values.replicaCount | int }} + {{- range $i, $_e := until $replicas }} + + {{ printf "%s-shard%d-%d.%s.%s.svc.%s" (include "common.names.fullname" $ ) $shard $i (include "clickhouse.headlessServiceName" $) (include "common.names.namespace" $) $.Values.clusterDomain }} + {{ $.Values.service.ports.tcp }} + + + + {{- end }} + + {{- end }} + + + {{- end }} + {{- if .Values.keeper.enabled }} + + + {{/*ClickHouse keeper configuration using the helm chart */}} + {{ $.Values.containerPorts.keeper }} + {{- if .Values.tls.enabled }} + {{ $.Values.containerPorts.keeperSecure }} + {{- end }} + + /bitnami/clickhouse/keeper/coordination/log + /bitnami/clickhouse/keeper/coordination/snapshots + + + 10000 + 30000 + trace + + + + {{- $nodes := .Values.replicaCount | int }} + {{- range $node, $e := until $nodes }} + + {{ $node | int }} + + {{ $.Values.service.ports.keeperInter }} + + {{- end }} + + + {{- end }} + {{- if or .Values.keeper.enabled .Values.zookeeper.enabled .Values.externalZookeeper.servers }} + + + {{- if or .Values.keeper.enabled }} + {{- $nodes := .Values.replicaCount | int }} + {{- range $node, $e := until $nodes }} + + + {{ $.Values.service.ports.keeper }} + + {{- end }} + {{- else if .Values.zookeeper.enabled }} + {{/* Zookeeper configuration using the helm chart */}} + {{- $nodes := .Values.zookeeper.replicaCount | int }} + {{- range $node, $e := until $nodes }} + + + {{ $.Values.zookeeper.service.ports.client }} + + {{- end }} + {{- else if .Values.externalZookeeper.servers }} + {{/* Zookeeper configuration using an external instance */}} + {{- range $node :=.Values.externalZookeeper.servers }} + + {{ $node }} + {{ $.Values.externalZookeeper.port }} + + {{- end }} + {{- end }} + + {{- end }} + {{- if .Values.tls.enabled }} + + + + + + {{- $certFileName := default "tls.crt" .Values.tls.certFilename }} + {{- $keyFileName := default "tls.key" .Values.tls.certKeyFilename }} + /bitnami/clickhouse/certs/{{$certFileName}} + /bitnami/clickhouse/certs/{{$keyFileName}} + none + true + sslv2,sslv3 + true + {{- if or .Values.tls.autoGenerated .Values.tls.certCAFilename }} + {{- $caFileName := default "ca.crt" .Values.tls.certCAFilename }} + /bitnami/clickhouse/certs/{{$caFileName}} + {{- else }} + true + {{- end }} + + + true + true + sslv2,sslv3 + true + none + + AcceptCertificateHandler + + + + {{- end }} + {{- if .Values.metrics.enabled }} + + + /metrics + + true + true + true + + {{- end }} + + +## @param existingOverridesConfigmap The name of an existing ConfigMap with your custom configuration for ClickHouse +## +existingOverridesConfigmap: "" + +## @param extraOverrides Extra configuration overrides (evaluated as a template) apart from the default +## +extraOverrides: "" + +## @param extraOverridesConfigmap The name of an existing ConfigMap with extra configuration for ClickHouse +## +extraOverridesConfigmap: "" + +## @param extraOverridesSecret The name of an existing ConfigMap with your custom configuration for ClickHouse +## +extraOverridesSecret: "" + +## @param usersExtraOverrides Users extra configuration overrides (evaluated as a template) apart from the default +## +usersExtraOverrides: "" + +## @param usersExtraOverridesConfigmap The name of an existing ConfigMap with users extra configuration for ClickHouse +## +usersExtraOverridesConfigmap: "" + +## @param usersExtraOverridesSecret The name of an existing ConfigMap with your custom users configuration for ClickHouse +## +usersExtraOverridesSecret: "" + +## @param initdbScripts Dictionary of initdb scripts +## Specify dictionary of scripts to be run at first boot +## Example: +## initdbScripts: +## my_init_script.sh: | +## #!/bin/bash +## echo "Do something." +## +initdbScripts: {} +## @param initdbScriptsSecret ConfigMap with the initdb scripts (Note: Overrides `initdbScripts`) +## +initdbScriptsSecret: "" + +## @param startdbScripts Dictionary of startdb scripts +## Specify dictionary of scripts to be run on every start +## Example: +## startdbScripts: +## my_start_script.sh: | +## #!/bin/bash +## echo "Do something." +## +startdbScripts: {} +## @param startdbScriptsSecret ConfigMap with the startdb scripts (Note: Overrides `startdbScripts`) +## +startdbScriptsSecret: "" + +## @param command Override default container command (useful when using custom images) +## +command: + - /scripts/setup.sh +## @param args Override default container args (useful when using custom images) +## +args: [] +## @param hostAliases ClickHouse pods host aliases +## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ +## +hostAliases: [] +## @param podLabels Extra labels for ClickHouse pods +## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ +## +podLabels: {} +## @param podAnnotations Annotations for ClickHouse pods +## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ +## +podAnnotations: {} +## @param podAffinityPreset Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` +## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity +## +podAffinityPreset: "" +## @param podAntiAffinityPreset Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` +## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity +## +podAntiAffinityPreset: soft +## Node affinity preset +## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity +## +nodeAffinityPreset: + ## @param nodeAffinityPreset.type Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` + ## + type: "" + ## @param nodeAffinityPreset.key Node label key to match. Ignored if `affinity` is set + ## + key: "" + ## @param nodeAffinityPreset.values Node label values to match. Ignored if `affinity` is set + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] +## @param affinity Affinity for ClickHouse pods assignment +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +## NOTE: `podAffinityPreset`, `podAntiAffinityPreset`, and `nodeAffinityPreset` will be ignored when it's set +## +affinity: {} +## @param nodeSelector Node labels for ClickHouse pods assignment +## ref: https://kubernetes.io/docs/user-guide/node-selection/ +## +nodeSelector: {} +## @param tolerations Tolerations for ClickHouse pods assignment +## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ +## +tolerations: [] +## @param updateStrategy.type ClickHouse statefulset strategy type +## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies +## +updateStrategy: + ## StrategyType + ## Can be set to RollingUpdate or OnDelete + ## + type: RollingUpdate + +## @param podManagementPolicy Statefulset Pod management policy, it needs to be Parallel to be able to complete the cluster join +## Ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#pod-management-policies +## +podManagementPolicy: Parallel + +## @param priorityClassName ClickHouse pods' priorityClassName +## +priorityClassName: "" +## @param topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template +## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods +## +topologySpreadConstraints: [] +## @param schedulerName Name of the k8s scheduler (other than default) for ClickHouse pods +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +schedulerName: "" +## @param terminationGracePeriodSeconds Seconds Redmine pod needs to terminate gracefully +## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods +## +terminationGracePeriodSeconds: "" +## @param lifecycleHooks for the ClickHouse container(s) to automate configuration before or after startup +## +lifecycleHooks: {} +## @param extraEnvVars Array with extra environment variables to add to ClickHouse nodes +## e.g: +## extraEnvVars: +## - name: FOO +## value: "bar" +## +extraEnvVars: [] +## @param extraEnvVarsCM Name of existing ConfigMap containing extra env vars for ClickHouse nodes +## +extraEnvVarsCM: "" +## @param extraEnvVarsSecret Name of existing Secret containing extra env vars for ClickHouse nodes +## +extraEnvVarsSecret: "" +## @param extraVolumes Optionally specify extra list of additional volumes for the ClickHouse pod(s) +## +extraVolumes: [] +## @param extraVolumeMounts Optionally specify extra list of additional volumeMounts for the ClickHouse container(s) +## +extraVolumeMounts: [] +## @param sidecars Add additional sidecar containers to the ClickHouse pod(s) +## e.g: +## sidecars: +## - name: your-image-name +## image: your-image +## imagePullPolicy: Always +## ports: +## - name: portname +## containerPort: 1234 +## +sidecars: [] +## @param initContainers Add additional init containers to the ClickHouse pod(s) +## ref: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ +## e.g: +## initContainers: +## - name: your-image-name +## image: your-image +## imagePullPolicy: Always +## command: ['sh', '-c', 'echo "hello world"'] +## +initContainers: [] + +## TLS configuration +## +tls: + ## @param tls.enabled Enable TLS traffic support + ## + enabled: false + ## @param tls.autoGenerated Generate automatically self-signed TLS certificates + ## + autoGenerated: false + ## @param tls.certificatesSecret Name of an existing secret that contains the certificates + ## + certificatesSecret: "" + ## @param tls.certFilename Certificate filename + ## + certFilename: "" + ## @param tls.certKeyFilename Certificate key filename + ## + certKeyFilename: "" + ## @param tls.certCAFilename CA Certificate filename + ## If provided, PostgreSQL will authenticate TLS/SSL clients by requesting them a certificate + ## ref: https://www.postgresql.org/docs/9.6/auth-methods.html + ## + certCAFilename: "" + +## @section Traffic Exposure Parameters +## + +## ClickHouse service parameters +## +service: + ## @param service.type ClickHouse service type + ## + type: ClusterIP + ## @param service.ports.http ClickHouse service HTTP port + ## @param service.ports.https ClickHouse service HTTPS port + ## @param service.ports.tcp ClickHouse service TCP port + ## @param service.ports.tcpSecure ClickHouse service TCP (secure) port + ## @param service.ports.keeper ClickHouse keeper TCP container port + ## @param service.ports.keeperSecure ClickHouse keeper TCP (secure) container port + ## @param service.ports.keeperInter ClickHouse keeper interserver TCP container port + ## @param service.ports.mysql ClickHouse service MySQL port + ## @param service.ports.postgresql ClickHouse service PostgreSQL port + ## @param service.ports.interserver ClickHouse service Interserver port + ## @param service.ports.metrics ClickHouse service metrics port + ## + ports: + http: 8123 + https: 443 + tcp: 9000 + tcpSecure: 9440 + keeper: 2181 + keeperSecure: 3181 + keeperInter: 9444 + mysql: 9004 + postgresql: 9005 + interserver: 9009 + metrics: 8001 + ## Node ports to expose + ## @param service.nodePorts.http Node port for HTTP + ## @param service.nodePorts.https Node port for HTTPS + ## @param service.nodePorts.tcp Node port for TCP + ## @param service.nodePorts.tcpSecure Node port for TCP (with TLS) + ## @param service.nodePorts.keeper ClickHouse keeper TCP container port + ## @param service.nodePorts.keeperSecure ClickHouse keeper TCP (secure) container port + ## @param service.nodePorts.keeperInter ClickHouse keeper interserver TCP container port + ## @param service.nodePorts.mysql Node port for MySQL + ## @param service.nodePorts.postgresql Node port for PostgreSQL + ## @param service.nodePorts.interserver Node port for Interserver + ## @param service.nodePorts.metrics Node port for metrics + ## NOTE: choose port between <30000-32767> + ## + nodePorts: + http: "" + https: "" + tcp: "" + tcpSecure: "" + keeper: "" + keeperSecure: "" + keeperInter: "" + mysql: "" + postgresql: "" + interserver: "" + metrics: "" + ## @param service.clusterIP ClickHouse service Cluster IP + ## e.g.: + ## clusterIP: None + ## + clusterIP: "" + ## @param service.loadBalancerIP ClickHouse service Load Balancer IP + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-loadbalancer + ## + loadBalancerIP: "" + ## @param service.loadBalancerSourceRanges ClickHouse service Load Balancer sources + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## e.g: + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## @param service.externalTrafficPolicy ClickHouse service external traffic policy + ## ref http://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + ## @param service.annotations Additional custom annotations for ClickHouse service + ## + annotations: {} + ## @param service.extraPorts Extra ports to expose in ClickHouse service (normally used with the `sidecars` value) + ## + extraPorts: [] + ## @param service.sessionAffinity Control where client requests go, to the same pod or round-robin + ## Values: ClientIP or None + ## ref: https://kubernetes.io/docs/user-guide/services/ + ## + sessionAffinity: None + ## @param service.sessionAffinityConfig Additional settings for the sessionAffinity + ## sessionAffinityConfig: + ## clientIP: + ## timeoutSeconds: 300 + ## + sessionAffinityConfig: {} + ## Headless service properties + ## + headless: + ## @param service.headless.annotations Annotations for the headless service. ## - ## If Configmap's enabled is `true`, Custom config.xml and metrica.xml - enabled: true + annotations: {} + +## External Access to ClickHouse configuration +## +externalAccess: + ## @param externalAccess.enabled Enable Kubernetes external cluster access to ClickHouse + ## + enabled: false + ## Parameters to configure K8s service(s) used to externally access ClickHouse + ## Note: A new service per will be created + ## + service: + ## @param externalAccess.service.type Kubernetes Service type for external access. It can be NodePort, LoadBalancer or ClusterIP ## - ## The maximum number of inbound connections. - max_connections: "4096" + type: LoadBalancer + ## @param externalAccess.service.ports.http ClickHouse service HTTP port + ## @param externalAccess.service.ports.https ClickHouse service HTTPS port + ## @param externalAccess.service.ports.tcp ClickHouse service TCP port + ## @param externalAccess.service.ports.tcpSecure ClickHouse service TCP (secure) port + ## @param externalAccess.service.ports.keeper ClickHouse keeper TCP container port + ## @param externalAccess.service.ports.keeperSecure ClickHouse keeper TCP (secure) container port + ## @param externalAccess.service.ports.keeperInter ClickHouse keeper interserver TCP container port + ## @param externalAccess.service.ports.mysql ClickHouse service MySQL port + ## @param externalAccess.service.ports.postgresql ClickHouse service PostgreSQL port + ## @param externalAccess.service.ports.interserver ClickHouse service Interserver port + ## @param externalAccess.service.ports.metrics ClickHouse service metrics port ## - ## The number of seconds that ClickHouse waits for incoming requests before closing the connection. - keep_alive_timeout: "3" + ports: + http: 80 + https: 443 + tcp: 9000 + tcpSecure: 9440 + keeper: 2181 + keeperSecure: 3181 + keeperInter: 9444 + mysql: 9004 + postgresql: 9005 + interserver: 9009 + metrics: 8001 + ## @param externalAccess.service.loadBalancerIPs Array of load balancer IPs for each ClickHouse . Length must be the same as replicaCount + ## e.g: + ## loadBalancerIPs: + ## - X.X.X.X + ## - Y.Y.Y.Y ## - ## The maximum number of simultaneously processed requests. - max_concurrent_queries: "100" + loadBalancerIPs: [] + ## @param externalAccess.service.loadBalancerAnnotations Array of load balancer annotations for each ClickHouse . Length must be the same as shards multiplied by replicaCount + ## e.g: + ## loadBalancerAnnotations: + ## - external-dns.alpha.kubernetes.io/hostname: 1.external.example.com. + ## - external-dns.alpha.kubernetes.io/hostname: 2.external.example.com. ## - ## Cache size (in bytes) for uncompressed data used by table engines from the MergeTree. - ## There is one shared cache for the server. Memory is allocated on demand. The cache is used if the option use_uncompressed_cache is enabled. - ## The uncompressed cache is advantageous for very short queries in individual cases. - uncompressed_cache_size: "8589934592" + loadBalancerAnnotations: [] + ## @param externalAccess.service.loadBalancerSourceRanges Address(es) that are allowed when service is LoadBalancer + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## e.g: + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 ## - ## Approximate size (in bytes) of the cache of "marks" used by MergeTree. - ## The cache is shared for the server and memory is allocated as needed. The cache size must be at least 5368709120. - mark_cache_size: "5368709120" + loadBalancerSourceRanges: [] + ## @param externalAccess.service.nodePorts.http Node port for HTTP + ## @param externalAccess.service.nodePorts.https Node port for HTTPS + ## @param externalAccess.service.nodePorts.tcp Node port for TCP + ## @param externalAccess.service.nodePorts.tcpSecure Node port for TCP (with TLS) + ## @param externalAccess.service.nodePorts.keeper ClickHouse keeper TCP container port + ## @param externalAccess.service.nodePorts.keeperSecure ClickHouse keeper TCP container port (with TLS) + ## @param externalAccess.service.nodePorts.keeperInter ClickHouse keeper interserver TCP container port + ## @param externalAccess.service.nodePorts.mysql Node port for MySQL + ## @param externalAccess.service.nodePorts.postgresql Node port for PostgreSQL + ## @param externalAccess.service.nodePorts.interserver Node port for Interserver + ## @param externalAccess.service.nodePorts.metrics Node port for metrics + ## NOTE: choose port between <30000-32767> + ## e.g: + ## nodePorts: + ## tls: + ## - 30001 + ## - 30002 ## - ## You can specify umask here (see "man umask"). Server will apply it on startup. - ## Number is always parsed as octal. Default umask is 027 (other users cannot read logs, data files, etc; group can only read). - umask: "022" + nodePorts: + http: [] + https: [] + tcp: [] + tcpSecure: [] + keeper: [] + keeperSecure: [] + keeperInter: [] + mysql: [] + postgresql: [] + interserver: [] + metrics: [] + ## @param externalAccess.service.labels Service labels for external access ## - ## Perform mlockall after startup to lower first queries latency and to prevent clickhouse executable from being paged out under high IO load. - ## Enabling this option is recommended but will lead to increased startup time for up to a few seconds. - mlock_executable: false + labels: {} + ## @param externalAccess.service.annotations Service annotations for external access ## - ## The interval in seconds before reloading built-in dictionaries. - ## ClickHouse reloads built-in dictionaries every x seconds. This makes it possible to edit dictionaries "on the fly" without restarting the server. - builtin_dictionaries_reload_interval: "3600" + annotations: {} + ## @param externalAccess.service.extraPorts Extra ports to expose in the ClickHouse external service ## - ## Maximum session timeout, in seconds. - max_session_timeout: "3600" + extraPorts: [] + +## ClickHouse ingress parameters +## ref: http://kubernetes.io/docs/user-guide/ingress/ +## +ingress: + ## @param ingress.enabled Enable ingress record generation for ClickHouse + ## + enabled: false + ## @param ingress.pathType Ingress path type + ## + pathType: ImplementationSpecific + ## @param ingress.apiVersion Force Ingress API version (automatically detected if not set) + ## + apiVersion: "" + ## @param ingress.hostname Default host for the ingress record + ## + hostname: clickhouse.local + ## @param ingress.ingressClassName IngressClass that will be be used to implement the Ingress (Kubernetes 1.18+) + ## This is supported in Kubernetes 1.18+ and required if you have more than one IngressClass marked as the default for your cluster . + ## ref: https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/ + ## + ingressClassName: "" + ## @param ingress.path Default path for the ingress record + ## NOTE: You may need to set this to '/*' in order to use this with ALB ingress controllers + ## + path: / + ## @param ingress.annotations Additional annotations for the Ingress resource. To enable certificate autogeneration, place here your cert-manager annotations. + ## Use this parameter to set the required annotations for cert-manager, see + ## ref: https://cert-manager.io/docs/usage/ingress/#supported-annotations + ## e.g: + ## annotations: + ## kubernetes.io/ingress.class: nginx + ## cert-manager.io/cluster-issuer: cluster-issuer-name + ## + annotations: {} + ## @param ingress.tls Enable TLS configuration for the host defined at `ingress.hostname` parameter + ## TLS certificates will be retrieved from a TLS secret with name: `{{- printf "%s-tls" .Values.ingress.hostname }}` + ## You can: + ## - Use the `ingress.secrets` parameter to create this TLS secret + ## - Rely on cert-manager to create it by setting the corresponding annotations + ## - Rely on Helm to create self-signed certificates by setting `ingress.selfSigned=true` + ## + tls: false + ## @param ingress.selfSigned Create a TLS secret for this ingress record using self-signed certificates generated by Helm + ## + selfSigned: false + ## @param ingress.extraHosts An array with additional hostname(s) to be covered with the ingress record + ## e.g: + ## extraHosts: + ## - name: clickhouse.local + ## path: / + ## + extraHosts: [] + ## @param ingress.extraPaths An array with additional arbitrary paths that may need to be added to the ingress under the main host + ## e.g: + ## extraPaths: + ## - path: /* + ## backend: + ## serviceName: ssl-redirect + ## servicePort: use-annotation + ## + extraPaths: [] + ## @param ingress.extraTls TLS configuration for additional hostname(s) to be covered with this ingress record + ## ref: https://kubernetes.io/docs/concepts/services-networking/ingress/#tls + ## e.g: + ## extraTls: + ## - hosts: + ## - clickhouse.local + ## secretName: clickhouse.local-tls + ## + extraTls: [] + ## @param ingress.secrets Custom TLS certificates as secrets + ## NOTE: 'key' and 'certificate' are expected in PEM format + ## NOTE: 'name' should line up with a 'secretName' set further up + ## If it is not set and you're using cert-manager, this is unneeded, as it will create a secret for you with valid certificates + ## If it is not set and you're NOT using cert-manager either, self-signed certificates will be created valid for 365 days + ## It is also possible to create and manage the certificates outside of this helm chart + ## Please see README.md for more information + ## e.g: + ## secrets: + ## - name: clickhouse.local-tls + ## key: |- + ## -----BEGIN RSA PRIVATE KEY----- + ## ... + ## -----END RSA PRIVATE KEY----- + ## certificate: |- + ## -----BEGIN CERTIFICATE----- + ## ... + ## -----END CERTIFICATE----- + ## + secrets: [] + ## @param ingress.extraRules Additional rules to be covered with this ingress record + ## ref: https://kubernetes.io/docs/concepts/services-networking/ingress/#ingress-rules + ## e.g: + ## extraRules: + ## - host: example.local + ## http: + ## path: / + ## backend: + ## service: + ## name: example-svc + ## port: + ## name: http + ## + extraRules: [] + +## @section Persistence Parameters +## + +## Enable persistence using Persistent Volume Claims +## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ +## +persistence: + ## @param persistence.enabled Enable persistence using Persistent Volume Claims + ## + enabled: true + ## @param persistence.existingClaim Name of an existing PVC to use + ## + existingClaim: "" + ## @param persistence.storageClass Storage class of backing PVC + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + storageClass: "" + ## @param persistence.labels Persistent Volume Claim labels + ## + labels: {} + ## @param persistence.annotations Persistent Volume Claim annotations + ## + annotations: {} + ## @param persistence.accessModes Persistent Volume Access Modes + ## + accessModes: + - ReadWriteOnce + ## @param persistence.size Size of data volume + ## + size: 15Gi + ## @param persistence.selector Selector to match an existing Persistent Volume for ClickHouse data PVC + ## If set, the PVC can't have a PV dynamically provisioned for it + ## E.g. + ## selector: + ## matchLabels: + ## app: my-app + ## + selector: {} + ## @param persistence.dataSource Custom PVC data source + ## + dataSource: {} +## @section Init Container Parameters +## + +## 'volumePermissions' init container parameters +## Changes the owner and group of the persistent volume mount point to runAsUser:fsGroup values +## based on the *podSecurityContext/*containerSecurityContext parameters +## +volumePermissions: + ## @param volumePermissions.enabled Enable init container that changes the owner/group of the PV mount point to `runAsUser:fsGroup` + ## + enabled: false + ## OS Shell + Utility image + ## ref: https://hub.docker.com/r/bitnami/os-shell/tags/ + ## @param volumePermissions.image.registry [default: REGISTRY_NAME] OS Shell + Utility image registry + ## @param volumePermissions.image.repository [default: REPOSITORY_NAME/os-shell] OS Shell + Utility image repository + ## @skip volumePermissions.image.tag OS Shell + Utility image tag (immutable tags are recommended) + ## @param volumePermissions.image.pullPolicy OS Shell + Utility image pull policy + ## @param volumePermissions.image.pullSecrets OS Shell + Utility image pull secrets + ## + image: + registry: docker.io + repository: bitnami/os-shell + tag: 11-debian-11-r91 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## e.g: + ## pullSecrets: + ## - myRegistryKeySecretName ## - ## Default session timeout, in seconds. - default_session_timeout: "60" + pullSecrets: [] + ## Init container's resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## @param volumePermissions.resources.limits The resources limits for the init container + ## @param volumePermissions.resources.requests The requested resources for the init container + ## + resources: + limits: {} + requests: {} + ## Init container Container Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container + ## @param volumePermissions.containerSecurityContext.runAsUser Set init container's Security Context runAsUser + ## NOTE: when runAsUser is set to special value "auto", init container will try to chown the + ## data folder to auto-determined user&group, using commands: `id -u`:`id -G | cut -d" " -f2` + ## "auto" is especially useful for OpenShift which has scc with dynamic user ids (and 0 is not allowed) + ## + containerSecurityContext: + runAsUser: 0 + +## @section Other Parameters +## + +## ServiceAccount configuration +## +serviceAccount: + ## @param serviceAccount.create Specifies whether a ServiceAccount should be created + ## + create: true + ## @param serviceAccount.name The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the common.names.fullname template + ## + name: "" + ## @param serviceAccount.annotations Additional Service Account annotations (evaluated as a template) + ## + annotations: {} + ## @param serviceAccount.automountServiceAccountToken Automount service account token for the server service account + ## + automountServiceAccountToken: true + +## Prometheus metrics +## +metrics: + ## @param metrics.enabled Enable the export of Prometheus metrics + ## + enabled: false + ## @param metrics.podAnnotations [object] Annotations for metrics scraping + ## + podAnnotations: + prometheus.io/scrape: "true" + prometheus.io/port: "{{ .Values.containerPorts.metrics }}" + ## Prometheus Operator ServiceMonitor configuration + ## + serviceMonitor: + ## @param metrics.serviceMonitor.enabled if `true`, creates a Prometheus Operator ServiceMonitor (also requires `metrics.enabled` to be `true`) ## - ## Uncomment to disable ClickHouse internal DNS caching. - disable_internal_dns_cache: "1" + enabled: false + ## @param metrics.serviceMonitor.namespace Namespace in which Prometheus is running ## - ## The maximum number of open files. - ## We recommend using this option in Mac OS X, since the getrlimit() function returns an incorrect value. - #max_open_files: + namespace: "" + ## @param metrics.serviceMonitor.annotations Additional custom annotations for the ServiceMonitor ## - ## The host name that can be used by other servers to access this server. - ## If omitted, it is defined in the same way as the hostname-f command. - ## Useful for breaking away from a specific network interface. - #interserver_http_host: + annotations: {} + ## @param metrics.serviceMonitor.labels Extra labels for the ServiceMonitor ## - ## Logging settings. - # path – The log path. Default value: /var/log/clickhouse-server. - # level – Logging level. Acceptable values: trace, debug, information, warning, error. Default value: /var/log/clickhouse-server - # size – Size of the file. Applies to loganderrorlog. Once the file reaches size, ClickHouse archives and renames it, and creates a new log file in its place. - # count – The number of archived log files that ClickHouse stores. - logger: - path: "/var/log/clickhouse-server" - level: "trace" - size: "1000M" - count: "10" + labels: {} + ## @param metrics.serviceMonitor.jobLabel The name of the label on the target service to use as the job name in Prometheus ## - ## Data compression settings. - # min_part_size – The minimum size of a table part. - # min_part_size_ratio – The ratio of the minimum size of a table part to the full size of the table. - # method – Compression method. Acceptable values ​: lz4 or zstd(experimental). - compression: - enabled: false - cases: - - min_part_size: "10000000000" - min_part_size_ratio: "0.01" - method: "zstd" + jobLabel: "" + ## @param metrics.serviceMonitor.honorLabels honorLabels chooses the metric's labels on collisions with target labels ## - ## Contains settings that allow ClickHouse to interact with a ZooKeeper cluster. - ## ClickHouse uses ZooKeeper for storing metadata of replicas when using replicated tables. If replicated tables are not used, this section of parameters can be omitted. - # node — ZooKeeper endpoint. You can set multiple endpoints. - # session_timeout — Maximum timeout for the client session in milliseconds. - # root — The znode that is used as the root for znodes used by the ClickHouse server. Optional. - # identity — User and password, that can be required by ZooKeeper to give access to requested znodes. Optional. - zookeeper_servers: - enabled: false - session_timeout_ms: "30000" - operation_timeout_ms: "10000" - #root: "/path/to/zookeeper/node" - #identity: "user:password" - config: - - index: "" - host: "" - port: "" + honorLabels: false + ## @param metrics.serviceMonitor.interval Interval at which metrics should be scraped. + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + ## e.g: + ## interval: 10s ## - ## Configuration of clusters used by the Distributed table engine. - ## The parameters host, port, and optionally user, password, secure, compression are specified for each server: - # host – The address of the remote server. - # port – The TCP port for messenger activity ('tcp_port' in the config, usually set to 9000). - # user – Name of the user for connecting to a remote server. Access is configured in the users.xml file. For more information, see the section "Access rights". - # password – The password for connecting to a remote server (not masked). - # secure - Use ssl for connection, usually you also should define port = 9440. Server should listen on 9440 and have correct certificates. - # compression - Use data compression. Default value: true. - remote_servers: - enabled: true - internal_replication: false - replica: - user: "default" - #password: "" - compression: true - backup: - enabled: true + interval: "" + ## @param metrics.serviceMonitor.scrapeTimeout Timeout after which the scrape is ended + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + ## e.g: + ## scrapeTimeout: 10s ## - ## Sending data to Graphite. - # interval – The interval for sending, in seconds. - # timeout – The timeout for sending data, in seconds. - # root_path – Prefix for keys. - # metrics – Sending data from a :ref:system_tables-system.metrics table. - # events – Sending data from a :ref:system_tables-system.events table. - # asynchronous_metrics – Sending data from a :ref:system_tables-system.asynchronous_metrics table. - ## You can configure multiple clauses. For instance, you can use this for sending different data at different intervals. - graphite: - enabled: false - config: - - timeout: "0.1" - interval: "60" - root_path: "one_min" - metrics: true - events: true - events_cumulative: true - asynchronous_metrics: true + scrapeTimeout: "" + ## @param metrics.serviceMonitor.metricRelabelings Specify additional relabeling of metrics ## - ## A settings profile is a collection of settings grouped under the same name. - ## Each ClickHouse user has a profile. - ## To apply all the settings in a profile, set the profile setting. - ## More info: https://clickhouse.yandex/docs/en/operations/settings/settings_profiles/ - profiles: - enabled: false - profile: - - name: "default" - config: - max_memory_usage: "10000000000" - use_uncompressed_cache: "0" - load_balancing: "random" + metricRelabelings: [] + ## @param metrics.serviceMonitor.relabelings Specify general relabeling ## - ## The users section of the user.xml configuration file contains user settings. - ## More info: https://clickhouse.yandex/docs/en/operations/settings/settings_users/ - users: - enabled: false - user: - - name: "default" - config: - #password: "" - networks: - - "::/0" - profile: "default" - quota: "default" + relabelings: [] + ## @param metrics.serviceMonitor.selector Prometheus instance selector labels + ## ref: https://github.com/bitnami/charts/tree/main/bitnami/prometheus-operator#prometheus-configuration + ## selector: + ## prometheus: my-prometheus ## - ## Quotas allow you to limit resource usage over a period of time, or simply track the use of resources. - ## Quotas are set up in the user config. This is usually 'users.xml'. - ## More info: https://clickhouse.yandex/docs/en/operations/quotas/ - quotas: - enabled: false - quota: - - name: "default" - config: - - duration: "3600" - queries: "0" - errors: "0" - result_rows: "0" - read_rows: "0" - execution_time: "0" - -## -## Web interface for ClickHouse in the Tabix project. -## Features: -# Works with ClickHouse directly from the browser, without the need to install additional software. -# Query editor with syntax highlighting. -# Auto-completion of commands. -# Tools for graphical analysis of query execution. -# Color scheme options. -tabix: - ## - ## Enable Tabix - enabled: true - ## - ## ## The instance number of Tabix - replicas: "1" + selector: {} + + ## Prometheus Operator PrometheusRule configuration ## - ## The deployment strategy to use to replace existing pods with new ones. - updateStrategy: + prometheusRule: + ## @param metrics.prometheusRule.enabled Create a PrometheusRule for Prometheus Operator ## - ## Type of deployment. Can be "Recreate" or "RollingUpdate". Default is RollingUpdate. - type: "RollingUpdate" - ## - ## The maximum number of pods that can be scheduled above the desired number of pods. - maxSurge: 3 - ## - ## The maximum number of pods that can be unavailable during the update. - maxUnavailable: 1 - ## - ## Docker image name. - image: "spoonest/clickhouse-tabix-web-client" - ## - ## Docker image version - imageVersion: "stable" - ## - ## Image pull policy. One of Always, Never, IfNotPresent. - ## Defaults to Always if :latest tag is specified, or IfNotPresent otherwise. Cannot be updated. - ## More info: https://kubernetes.io/docs/concepts/containers/images#updating-images - imagePullPolicy: "IfNotPresent" - ## Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. - ## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - livenessProbe: - enabled: true - initialDelaySeconds: "30" - periodSeconds: "30" - timeoutSeconds: "5" - failureThreshold: "3" - successThreshold: "1" - ## Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. - ## More info: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle#container-probes - readinessProbe: - enabled: true - initialDelaySeconds: "30" - periodSeconds: "30" - timeoutSeconds: "5" - failureThreshold: "3" - successThreshold: "1" - ## - ## ImagePullSecrets is an optional list of references to secrets in the same namespace to use for pulling any of the images used by this PodSpec. - ## If specified, these secrets will be passed to individual puller implementations for them to use. - ## For example, in the case of docker, only DockerConfig type secrets are honored. - ## More info: https://kubernetes.io/docs/concepts/containers/images#specifying-imagepullsecrets-on-a-pod - #imagePullSecrets: - ## - ## You can limit access to your tabix.ui application on the proxy level. - ## User and Password parameters to restrict access only for specified user. - security: - user: "admin" - password: "admin" - ## - ## You can automatically connect to a Clickhouse server by specifying chName, chHost, chHost, chPassword and/or chParams environment variables. - #automaticConnection: - # chName: "test" - # chHost: "test" - # chLogin: "test" - # chPassword: "test" - # chParams: "" - ## - ## An API object that manages external access to the services in a cluster, typically HTTP. - ## Ingress can provide load balancing, SSL termination and name-based virtual hosting. - ingress: enabled: false - # host: "tabix.domain.com" - # path: "/" - # tls: - # enabled: false - # hosts: - # - "tabix.domain.com" - # - "tabix.domain1.com" - # secretName: "tabix-secret" + ## @param metrics.prometheusRule.namespace Namespace for the PrometheusRule Resource (defaults to the Release Namespace) + ## + namespace: "" + ## @param metrics.prometheusRule.additionalLabels Additional labels that can be used so PrometheusRule will be discovered by Prometheus + ## + additionalLabels: {} + ## @param metrics.prometheusRule.rules PrometheusRule definitions + ## - alert: ClickhouseServerRestart + ## annotations: + ## message: Clickhouse-server started recently + ## expr: ClickHouseAsyncMetrics_Uptime > 1 < 180 + ## for: 5m + ## labels: + ## severity: warning + rules: [] + +## @section External Zookeeper paramaters +## +externalZookeeper: + ## @param externalZookeeper.servers List of external zookeeper servers to use + ## @param externalZookeeper.port Port of the Zookeeper servers + ## + servers: [] + port: 2888 + +## @section Zookeeper subchart parameters +## +## @param zookeeper.enabled Deploy Zookeeper subchart +## @param zookeeper.replicaCount Number of Zookeeper instances +## @param zookeeper.service.ports.client Zookeeper client port +## +zookeeper: + enabled: false + ## Override zookeeper default image as 3.9 is not supported https://github.com/ClickHouse/ClickHouse/issues/53749 + ## ref: https://github.com/bitnami/containers/tree/main/bitnami/zookeeper + ## @param zookeeper.image.registry [default: REGISTRY_NAME] Zookeeper image registry + ## @param zookeeper.image.repository [default: REPOSITORY_NAME/zookeeper] Zookeeper image repository + ## @skip zookeeper.image.tag Zookeeper image tag (immutable tags are recommended) + ## @param zookeeper.image.pullPolicy Zookeeper image pull policy + image: + registry: docker.io + repository: bitnami/zookeeper + tag: 3.8.3-debian-11-r2 + pullPolicy: IfNotPresent + replicaCount: 3 + service: + ports: + client: 2181 From 318eadb249c79bdb46e13a003f86ebf5ac4af26a Mon Sep 17 00:00:00 2001 From: an1l4 <1995anila@gmail.com> Date: Tue, 5 Dec 2023 14:24:13 +0530 Subject: [PATCH 4/6] error-handle-changes-added --- client/pkg/clickhouse/db_client.go | 90 ++++++++++-------------------- 1 file changed, 30 insertions(+), 60 deletions(-) diff --git a/client/pkg/clickhouse/db_client.go b/client/pkg/clickhouse/db_client.go index 011fc0ff..82fd4359 100644 --- a/client/pkg/clickhouse/db_client.go +++ b/client/pkg/clickhouse/db_client.go @@ -136,14 +136,12 @@ func (c *DBClient) InsertContainerEventAzure(pushEvent model.AzureContainerPushE tx, err := c.conn.Begin() if err != nil { - log.Printf("error beginning transaction: %v", err) - return + log.Fatalf("error beginning transaction, clickhouse connection not available: %v", err) } stmt, err := tx.Prepare(string(InsertAzureContainerPushEvent)) if err != nil { - log.Printf("error preparing statement: %v", err) - return + log.Fatalf("error preparing statement: %v", err) } defer stmt.Close() @@ -187,14 +185,12 @@ func (c *DBClient) InsertContainerEventAzure(pushEvent model.AzureContainerPushE func (c *DBClient) InsertContainerEventQuay(pushEvent model.QuayImagePushPayload) { tx, err := c.conn.Begin() if err != nil { - log.Printf("error beginning transaction: %v", err) - return + log.Fatalf("error beginning transaction, clickhouse connection not available: %v", err) } stmt, err := tx.Prepare(string(InsertQuayContainerPushEvent)) if err != nil { - log.Printf("error preparing statement: %v", err) - return + log.Fatalf("error preparing statement: %v", err) } defer stmt.Close() @@ -240,14 +236,12 @@ func (c *DBClient) InsertContainerEventQuay(pushEvent model.QuayImagePushPayload func (c *DBClient) InsertContainerEventJfrog(pushEvent model.JfrogContainerPushEventPayload) { tx, err := c.conn.Begin() if err != nil { - log.Printf("error beginning transaction: %v", err) - return + log.Fatalf("error beginning transaction, clickhouse connection not available: %v", err) } stmt, err := tx.Prepare(string(InsertJfrogContainerPushEvent)) if err != nil { - log.Printf("error preparing statement: %v", err) - return + log.Fatalf("error preparing statement: %v", err) } defer stmt.Close() @@ -293,13 +287,11 @@ func (c *DBClient) InsertContainerEventJfrog(pushEvent model.JfrogContainerPushE func (c *DBClient) InsertRakeesMetrics(metrics model.RakeesMetrics) { tx, err := c.conn.Begin() if err != nil { - log.Printf("error beginning transaction: %v", err) - return + log.Fatalf("error beginning transaction, clickhouse connection not available: %v", err) } stmt, err := tx.Prepare(string(InsertRakees)) if err != nil { - log.Printf("error preparing statement: %v", err) - return + log.Fatalf("error preparing statement: %v", err) } defer stmt.Close() @@ -325,13 +317,11 @@ func (c *DBClient) InsertRakeesMetrics(metrics model.RakeesMetrics) { func (c *DBClient) InsertKetallEvent(metrics model.Resource) { tx, err := c.conn.Begin() if err != nil { - log.Printf("error beginning transaction: %v", err) - return + log.Fatalf("error beginning transaction, clickhouse connection not available: %v", err) } stmt, err := tx.Prepare(string(InsertKetall)) if err != nil { - log.Printf("error preparing statement: %v", err) - return + log.Fatalf("error preparing statement: %v", err) } defer stmt.Close() @@ -356,13 +346,11 @@ func (c *DBClient) InsertKetallEvent(metrics model.Resource) { func (c *DBClient) InsertOutdatedEvent(metrics model.CheckResultfinal) { tx, err := c.conn.Begin() if err != nil { - log.Printf("error beginning transaction: %v", err) - return + log.Fatalf("error beginning transaction, clickhouse connection not available: %v", err) } stmt, err := tx.Prepare(string(InsertOutdated)) if err != nil { - log.Printf("error preparing statement: %v", err) - return + log.Fatalf("error preparing statement: %v", err) } defer stmt.Close() @@ -389,13 +377,11 @@ func (c *DBClient) InsertOutdatedEvent(metrics model.CheckResultfinal) { func (c *DBClient) InsertDeprecatedAPI(deprecatedAPI model.DeprecatedAPI) { tx, err := c.conn.Begin() if err != nil { - log.Printf("error beginning transaction: %v", err) - return + log.Fatalf("error beginning transaction, clickhouse connection not available: %v", err) } stmt, err := tx.Prepare(string(InsertDepricatedApi)) if err != nil { - log.Printf("error preparing statement: %v", err) - return + log.Fatalf("error preparing statement: %v", err) } defer stmt.Close() @@ -429,13 +415,11 @@ func (c *DBClient) InsertDeprecatedAPI(deprecatedAPI model.DeprecatedAPI) { func (c *DBClient) InsertDeletedAPI(deletedAPI model.DeletedAPI) { tx, err := c.conn.Begin() if err != nil { - log.Printf("error beginning transaction: %v", err) - return + log.Fatalf("error beginning transaction, clickhouse connection not available: %v", err) } stmt, err := tx.Prepare(string(InsertDeletedApi)) if err != nil { - log.Printf("error preparing statement: %v", err) - return + log.Fatalf("error preparing statement: %v", err) } defer stmt.Close() @@ -470,13 +454,11 @@ func (c *DBClient) InsertDeletedAPI(deletedAPI model.DeletedAPI) { func (c *DBClient) InsertKubvizEvent(metrics model.Metrics) { tx, err := c.conn.Begin() if err != nil { - log.Printf("error beginning transaction: %v", err) - return + log.Fatalf("error beginning transaction, clickhouse connection not available: %v", err) } stmt, err := tx.Prepare(string(InsertKubvizEvent)) if err != nil { - log.Printf("error preparing statement: %v", err) - return + log.Fatalf("error preparing statement: %v", err) } defer stmt.Close() @@ -539,13 +521,11 @@ func (c *DBClient) InsertContainerEvent(event string) { func (c *DBClient) InsertKubeScoreMetrics(metrics model.KubeScoreRecommendations) { tx, err := c.conn.Begin() if err != nil { - log.Printf("error beginning transaction: %v", err) - return + log.Fatalf("error beginning transaction, clickhouse connection not available: %v", err) } stmt, err := tx.Prepare(InsertKubeScore) if err != nil { - log.Printf("error preparing statement: %v", err) - return + log.Fatalf("error preparing statement: %v", err) } defer stmt.Close() @@ -571,13 +551,11 @@ func (c *DBClient) InsertTrivyMetrics(metrics model.Trivy) { for _, vulnerability := range result.Vulnerabilities { tx, err := c.conn.Begin() if err != nil { - log.Printf("error beginning transaction: %v", err) - return + log.Fatalf("error beginning transaction, clickhouse connection not available: %v", err) } stmt, err := tx.Prepare(InsertTrivyVul) if err != nil { - log.Printf("error preparing statement: %v", err) - return + log.Fatalf("error preparing statement: %v", err) } if _, err := stmt.Exec( metrics.ID, @@ -608,13 +586,11 @@ func (c *DBClient) InsertTrivyMetrics(metrics model.Trivy) { for _, misconfiguration := range result.Misconfigurations { tx, err := c.conn.Begin() if err != nil { - log.Printf("error beginning transaction: %v", err) - return + log.Fatalf("error beginning transaction, clickhouse connection not available: %v", err) } stmt, err := tx.Prepare(InsertTrivyMisconfig) if err != nil { - log.Printf("error preparing statement: %v", err) - return + log.Fatalf("error preparing statement: %v", err) } defer stmt.Close() @@ -654,13 +630,11 @@ func (c *DBClient) InsertTrivyImageMetrics(metrics model.TrivyImage) { for _, vulnerability := range result.Vulnerabilities { tx, err := c.conn.Begin() if err != nil { - log.Printf("error beginning transaction: %v", err) - return + log.Fatalf("error beginning transaction, clickhouse connection not available: %v", err) } stmt, err := tx.Prepare(InsertTrivyImage) if err != nil { - log.Printf("error preparing statement: %v", err) - return + log.Fatalf("error preparing statement: %v", err) } if _, err := stmt.Exec( @@ -700,13 +674,11 @@ func (c *DBClient) InsertTrivySbomMetrics(metrics model.Sbom) { if result.CycloneDX != nil { tx, err := c.conn.Begin() if err != nil { - log.Printf("error beginning transaction: %v", err) - return + log.Fatalf("error beginning transaction, clickhouse connection not available: %v", err) } stmt, err := tx.Prepare(InsertTrivySbom) if err != nil { - log.Printf("error preparing statement: %v", err) - return + log.Fatalf("error preparing statement: %v", err) } if _, err := stmt.Exec( @@ -834,14 +806,12 @@ func (c *DBClient) RetrieveKubvizEvent() ([]model.DbEvent, error) { func (c *DBClient) InsertContainerEventDockerHub(build model.DockerHubBuild) { tx, err := c.conn.Begin() if err != nil { - log.Printf("error beginning transaction: %v", err) - return + log.Fatalf("error beginning transaction, clickhouse connection not available: %v", err) } stmt, err := tx.Prepare(string(InsertDockerHubBuild)) if err != nil { - log.Printf("error preparing statement: %v", err) - return + log.Fatalf("error preparing statement: %v", err) } defer stmt.Close() From dd490239ae6c263c27dbd29152d6ba068d5761ee Mon Sep 17 00:00:00 2001 From: Akash LM Date: Mon, 11 Dec 2023 12:49:57 +0530 Subject: [PATCH 5/6] Updated clickhouse dependency in client --- charts/clickhouse/.helmignore | 21 - charts/clickhouse/Chart.yaml | 23 - charts/clickhouse/README.md | 529 -------- charts/clickhouse/templates/NOTES.txt | 58 - charts/clickhouse/templates/_helpers.tpl | 219 ---- .../clickhouse/templates/configmap-extra.yaml | 20 - .../templates/configmap-users-extra.yaml | 20 - charts/clickhouse/templates/configmap.yaml | 20 - charts/clickhouse/templates/extra-list.yaml | 9 - .../templates/ingress-tls-secrets.yaml | 44 - charts/clickhouse/templates/ingress.yaml | 59 - .../templates/init-scripts-secret.yaml | 19 - .../clickhouse/templates/prometheusrule.yaml | 24 - .../templates/scripts-configmap.yaml | 34 - .../clickhouse/templates/service-account.yaml | 19 - .../templates/service-external-access.yaml | 155 --- .../templates/service-headless.yaml | 69 - charts/clickhouse/templates/service.yaml | 152 --- .../clickhouse/templates/servicemonitor.yaml | 47 - .../templates/start-scripts-secret.yaml | 19 - charts/clickhouse/templates/statefulset.yaml | 425 ------- charts/clickhouse/templates/tls-secret.yaml | 29 - charts/clickhouse/values.yaml | 1131 ----------------- charts/client/Chart.yaml | 6 +- .../configmap-clickhouse-datasource.yaml | 6 +- .../configmap-vertamedia-datasource.yaml | 9 +- charts/client/templates/deployment.yaml | 14 +- charts/client/values.yaml | 8 +- 28 files changed, 30 insertions(+), 3158 deletions(-) delete mode 100644 charts/clickhouse/.helmignore delete mode 100644 charts/clickhouse/Chart.yaml delete mode 100644 charts/clickhouse/README.md delete mode 100644 charts/clickhouse/templates/NOTES.txt delete mode 100644 charts/clickhouse/templates/_helpers.tpl delete mode 100644 charts/clickhouse/templates/configmap-extra.yaml delete mode 100644 charts/clickhouse/templates/configmap-users-extra.yaml delete mode 100644 charts/clickhouse/templates/configmap.yaml delete mode 100644 charts/clickhouse/templates/extra-list.yaml delete mode 100644 charts/clickhouse/templates/ingress-tls-secrets.yaml delete mode 100644 charts/clickhouse/templates/ingress.yaml delete mode 100644 charts/clickhouse/templates/init-scripts-secret.yaml delete mode 100644 charts/clickhouse/templates/prometheusrule.yaml delete mode 100644 charts/clickhouse/templates/scripts-configmap.yaml delete mode 100644 charts/clickhouse/templates/service-account.yaml delete mode 100644 charts/clickhouse/templates/service-external-access.yaml delete mode 100644 charts/clickhouse/templates/service-headless.yaml delete mode 100644 charts/clickhouse/templates/service.yaml delete mode 100644 charts/clickhouse/templates/servicemonitor.yaml delete mode 100644 charts/clickhouse/templates/start-scripts-secret.yaml delete mode 100644 charts/clickhouse/templates/statefulset.yaml delete mode 100644 charts/clickhouse/templates/tls-secret.yaml delete mode 100644 charts/clickhouse/values.yaml diff --git a/charts/clickhouse/.helmignore b/charts/clickhouse/.helmignore deleted file mode 100644 index f0c13194..00000000 --- a/charts/clickhouse/.helmignore +++ /dev/null @@ -1,21 +0,0 @@ -# Patterns to ignore when building packages. -# This supports shell glob matching, relative path matching, and -# negation (prefixed with !). Only one pattern per line. -.DS_Store -# Common VCS dirs -.git/ -.gitignore -.bzr/ -.bzrignore -.hg/ -.hgignore -.svn/ -# Common backup files -*.swp -*.bak -*.tmp -*~ -# Various IDEs -.project -.idea/ -*.tmproj diff --git a/charts/clickhouse/Chart.yaml b/charts/clickhouse/Chart.yaml deleted file mode 100644 index 83ead92e..00000000 --- a/charts/clickhouse/Chart.yaml +++ /dev/null @@ -1,23 +0,0 @@ -apiVersion: v2 -appVersion: 23.10.5 -dependencies: -- name: common - repository: https://charts.bitnami.com/bitnami - tags: - - bitnami-common - version: 2.x.x -description: ClickHouse is an open-source column-oriented OLAP database management - system. Use it to boost your database performance while providing linear scalability - and hardware efficiency. -home: https://bitnami.com -icon: https://bitnami.com/assets/stacks/clickhouse/img/clickhouse-stack-220x234.png -keywords: -- database -- sharding -maintainers: -- name: VMware, Inc. - url: https://github.com/bitnami/charts -name: clickhouse -sources: -- https://github.com/bitnami/charts/tree/main/bitnami/clickhouse -version: 1.0.3 diff --git a/charts/clickhouse/README.md b/charts/clickhouse/README.md deleted file mode 100644 index ef654f7e..00000000 --- a/charts/clickhouse/README.md +++ /dev/null @@ -1,529 +0,0 @@ - - -# Bitnami package for ClickHouse - -ClickHouse is an open-source column-oriented OLAP database management system. Use it to boost your database performance while providing linear scalability and hardware efficiency. - -[Overview of ClickHouse](https://clickhouse.com/) - -Trademarks: This software listing is packaged by Bitnami. The respective trademarks mentioned in the offering are owned by the respective companies, and use of them does not imply any affiliation or endorsement. - -## TL;DR - -```console -helm install my-release oci://registry-1.docker.io/bitnamicharts/clickhouse -``` - -Looking to use ClickHouse in production? Try [VMware Tanzu Application Catalog](https://bitnami.com/enterprise), the enterprise edition of Bitnami Application Catalog. - -## Introduction - -Bitnami charts for Helm are carefully engineered, actively maintained and are the quickest and easiest way to deploy containers on a Kubernetes cluster that are ready to handle production workloads. - -This chart bootstraps a [ClickHouse](https://github.com/clickhouse/clickhouse) Deployment in a [Kubernetes](https://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. - -Bitnami charts can be used with [Kubeapps](https://kubeapps.dev/) for deployment and management of Helm Charts in clusters. - -[Learn more about the default configuration of the chart](https://docs.bitnami.com/kubernetes/infrastructure/clickhouse/get-started/). - -## Prerequisites - -- Kubernetes 1.23+ -- Helm 3.8.0+ -- PV provisioner support in the underlying infrastructure -- ReadWriteMany volumes for deployment scaling - -> If you are using Kubernetes 1.18, the following code needs to be commented out. -> seccompProfile: -> type: "RuntimeDefault" - -## Installing the Chart - -To install the chart with the release name `my-release`: - -```console -helm install my-release oci://REGISTRY_NAME/REPOSITORY_NAME/clickhouse -``` - -> Note: You need to substitute the placeholders `REGISTRY_NAME` and `REPOSITORY_NAME` with a reference to your Helm chart registry and repository. For example, in the case of Bitnami, you need to use `REGISTRY_NAME=registry-1.docker.io` and `REPOSITORY_NAME=bitnamicharts`. - -The command deploys ClickHouse on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation. - -> **Tip**: List all releases using `helm list` - -## Uninstalling the Chart - -To uninstall/delete the `my-release` deployment: - -```console -helm delete my-release -``` - -The command removes all the Kubernetes components associated with the chart and deletes the release. - -## Parameters - -### Global parameters - -| Name | Description | Value | -| ------------------------- | ----------------------------------------------- | ----- | -| `global.imageRegistry` | Global Docker image registry | `""` | -| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` | -| `global.storageClass` | Global StorageClass for Persistent Volume(s) | `""` | - -### Common parameters - -| Name | Description | Value | -| ------------------------ | --------------------------------------------------------------------------------------- | --------------- | -| `kubeVersion` | Override Kubernetes version | `""` | -| `nameOverride` | String to partially override common.names.name | `""` | -| `fullnameOverride` | String to fully override common.names.fullname | `""` | -| `namespaceOverride` | String to fully override common.names.namespace | `""` | -| `commonLabels` | Labels to add to all deployed objects | `{}` | -| `commonAnnotations` | Annotations to add to all deployed objects | `{}` | -| `clusterDomain` | Kubernetes cluster domain name | `cluster.local` | -| `extraDeploy` | Array of extra objects to deploy with the release | `[]` | -| `diagnosticMode.enabled` | Enable diagnostic mode (all probes will be disabled and the command will be overridden) | `false` | -| `diagnosticMode.command` | Command to override all containers in the deployment | `["sleep"]` | -| `diagnosticMode.args` | Args to override all containers in the deployment | `["infinity"]` | - -### ClickHouse Parameters - -| Name | Description | Value | -| --------------------------------------------------- | ---------------------------------------------------------------------------------------------------------- | ---------------------------- | -| `image.registry` | ClickHouse image registry | `REGISTRY_NAME` | -| `image.repository` | ClickHouse image repository | `REPOSITORY_NAME/clickhouse` | -| `image.digest` | ClickHouse image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | -| `image.pullPolicy` | ClickHouse image pull policy | `IfNotPresent` | -| `image.pullSecrets` | ClickHouse image pull secrets | `[]` | -| `image.debug` | Enable ClickHouse image debug mode | `false` | -| `shards` | Number of ClickHouse shards to deploy | `2` | -| `replicaCount` | Number of ClickHouse replicas per shard to deploy | `3` | -| `distributeReplicasByZone` | Schedules replicas of the same shard to different availability zones | `false` | -| `containerPorts.http` | ClickHouse HTTP container port | `8123` | -| `containerPorts.https` | ClickHouse HTTPS container port | `8443` | -| `containerPorts.tcp` | ClickHouse TCP container port | `9000` | -| `containerPorts.tcpSecure` | ClickHouse TCP (secure) container port | `9440` | -| `containerPorts.keeper` | ClickHouse keeper TCP container port | `2181` | -| `containerPorts.keeperSecure` | ClickHouse keeper TCP (secure) container port | `3181` | -| `containerPorts.keeperInter` | ClickHouse keeper interserver TCP container port | `9444` | -| `containerPorts.mysql` | ClickHouse MySQL container port | `9004` | -| `containerPorts.postgresql` | ClickHouse PostgreSQL container port | `9005` | -| `containerPorts.interserver` | ClickHouse Interserver container port | `9009` | -| `containerPorts.metrics` | ClickHouse metrics container port | `8001` | -| `livenessProbe.enabled` | Enable livenessProbe on ClickHouse containers | `true` | -| `livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `10` | -| `livenessProbe.periodSeconds` | Period seconds for livenessProbe | `10` | -| `livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `1` | -| `livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `3` | -| `livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | -| `readinessProbe.enabled` | Enable readinessProbe on ClickHouse containers | `true` | -| `readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `10` | -| `readinessProbe.periodSeconds` | Period seconds for readinessProbe | `10` | -| `readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `1` | -| `readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `3` | -| `readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | -| `startupProbe.enabled` | Enable startupProbe on ClickHouse containers | `false` | -| `startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `10` | -| `startupProbe.periodSeconds` | Period seconds for startupProbe | `10` | -| `startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `1` | -| `startupProbe.failureThreshold` | Failure threshold for startupProbe | `3` | -| `startupProbe.successThreshold` | Success threshold for startupProbe | `1` | -| `customLivenessProbe` | Custom livenessProbe that overrides the default one | `{}` | -| `customReadinessProbe` | Custom readinessProbe that overrides the default one | `{}` | -| `customStartupProbe` | Custom startupProbe that overrides the default one | `{}` | -| `resources.limits` | The resources limits for the ClickHouse containers | `{}` | -| `resources.requests` | The requested resources for the ClickHouse containers | `{}` | -| `podSecurityContext.enabled` | Enabled ClickHouse pods' Security Context | `true` | -| `podSecurityContext.fsGroup` | Set ClickHouse pod's Security Context fsGroup | `1001` | -| `containerSecurityContext.enabled` | Enable containers' Security Context | `true` | -| `containerSecurityContext.runAsUser` | Set containers' Security Context runAsUser | `1001` | -| `containerSecurityContext.runAsNonRoot` | Set containers' Security Context runAsNonRoot | `true` | -| `containerSecurityContext.readOnlyRootFilesystem` | Set read only root file system pod's | `false` | -| `containerSecurityContext.privileged` | Set contraller container's Security Context privileged | `false` | -| `containerSecurityContext.allowPrivilegeEscalation` | Set contraller container's Security Context allowPrivilegeEscalation | `false` | -| `containerSecurityContext.capabilities.drop` | List of capabilities to be droppedn | `["ALL"]` | -| `containerSecurityContext.seccompProfile.type` | Set container's Security Context seccomp profile | `RuntimeDefault` | -| `auth.username` | ClickHouse Admin username | `default` | -| `auth.password` | ClickHouse Admin password | `""` | -| `auth.existingSecret` | Name of a secret containing the Admin password | `""` | -| `auth.existingSecretKey` | Name of the key inside the existing secret | `""` | -| `logLevel` | Logging level | `information` | - -### ClickHouse keeper configuration parameters - -| Name | Description | Value | -| ------------------------------- | ------------------------------------------------------------------------------------------------------------------------ | ----------------------- | -| `keeper.enabled` | Deploy ClickHouse keeper. Support is experimental. | `false` | -| `defaultConfigurationOverrides` | Default configuration overrides (evaluated as a template) | `""` | -| `existingOverridesConfigmap` | The name of an existing ConfigMap with your custom configuration for ClickHouse | `""` | -| `extraOverrides` | Extra configuration overrides (evaluated as a template) apart from the default | `""` | -| `extraOverridesConfigmap` | The name of an existing ConfigMap with extra configuration for ClickHouse | `""` | -| `extraOverridesSecret` | The name of an existing ConfigMap with your custom configuration for ClickHouse | `""` | -| `usersExtraOverrides` | Users extra configuration overrides (evaluated as a template) apart from the default | `""` | -| `usersExtraOverridesConfigmap` | The name of an existing ConfigMap with users extra configuration for ClickHouse | `""` | -| `usersExtraOverridesSecret` | The name of an existing ConfigMap with your custom users configuration for ClickHouse | `""` | -| `initdbScripts` | Dictionary of initdb scripts | `{}` | -| `initdbScriptsSecret` | ConfigMap with the initdb scripts (Note: Overrides `initdbScripts`) | `""` | -| `startdbScripts` | Dictionary of startdb scripts | `{}` | -| `startdbScriptsSecret` | ConfigMap with the startdb scripts (Note: Overrides `startdbScripts`) | `""` | -| `command` | Override default container command (useful when using custom images) | `["/scripts/setup.sh"]` | -| `args` | Override default container args (useful when using custom images) | `[]` | -| `hostAliases` | ClickHouse pods host aliases | `[]` | -| `podLabels` | Extra labels for ClickHouse pods | `{}` | -| `podAnnotations` | Annotations for ClickHouse pods | `{}` | -| `podAffinityPreset` | Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | -| `podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `soft` | -| `nodeAffinityPreset.type` | Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | -| `nodeAffinityPreset.key` | Node label key to match. Ignored if `affinity` is set | `""` | -| `nodeAffinityPreset.values` | Node label values to match. Ignored if `affinity` is set | `[]` | -| `affinity` | Affinity for ClickHouse pods assignment | `{}` | -| `nodeSelector` | Node labels for ClickHouse pods assignment | `{}` | -| `tolerations` | Tolerations for ClickHouse pods assignment | `[]` | -| `updateStrategy.type` | ClickHouse statefulset strategy type | `RollingUpdate` | -| `podManagementPolicy` | Statefulset Pod management policy, it needs to be Parallel to be able to complete the cluster join | `Parallel` | -| `priorityClassName` | ClickHouse pods' priorityClassName | `""` | -| `topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template | `[]` | -| `schedulerName` | Name of the k8s scheduler (other than default) for ClickHouse pods | `""` | -| `terminationGracePeriodSeconds` | Seconds Redmine pod needs to terminate gracefully | `""` | -| `lifecycleHooks` | for the ClickHouse container(s) to automate configuration before or after startup | `{}` | -| `extraEnvVars` | Array with extra environment variables to add to ClickHouse nodes | `[]` | -| `extraEnvVarsCM` | Name of existing ConfigMap containing extra env vars for ClickHouse nodes | `""` | -| `extraEnvVarsSecret` | Name of existing Secret containing extra env vars for ClickHouse nodes | `""` | -| `extraVolumes` | Optionally specify extra list of additional volumes for the ClickHouse pod(s) | `[]` | -| `extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the ClickHouse container(s) | `[]` | -| `sidecars` | Add additional sidecar containers to the ClickHouse pod(s) | `[]` | -| `initContainers` | Add additional init containers to the ClickHouse pod(s) | `[]` | -| `tls.enabled` | Enable TLS traffic support | `false` | -| `tls.autoGenerated` | Generate automatically self-signed TLS certificates | `false` | -| `tls.certificatesSecret` | Name of an existing secret that contains the certificates | `""` | -| `tls.certFilename` | Certificate filename | `""` | -| `tls.certKeyFilename` | Certificate key filename | `""` | -| `tls.certCAFilename` | CA Certificate filename | `""` | - -### Traffic Exposure Parameters - -| Name | Description | Value | -| ------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------- | ------------------------ | -| `service.type` | ClickHouse service type | `ClusterIP` | -| `service.ports.http` | ClickHouse service HTTP port | `8123` | -| `service.ports.https` | ClickHouse service HTTPS port | `443` | -| `service.ports.tcp` | ClickHouse service TCP port | `9000` | -| `service.ports.tcpSecure` | ClickHouse service TCP (secure) port | `9440` | -| `service.ports.keeper` | ClickHouse keeper TCP container port | `2181` | -| `service.ports.keeperSecure` | ClickHouse keeper TCP (secure) container port | `3181` | -| `service.ports.keeperInter` | ClickHouse keeper interserver TCP container port | `9444` | -| `service.ports.mysql` | ClickHouse service MySQL port | `9004` | -| `service.ports.postgresql` | ClickHouse service PostgreSQL port | `9005` | -| `service.ports.interserver` | ClickHouse service Interserver port | `9009` | -| `service.ports.metrics` | ClickHouse service metrics port | `8001` | -| `service.nodePorts.http` | Node port for HTTP | `""` | -| `service.nodePorts.https` | Node port for HTTPS | `""` | -| `service.nodePorts.tcp` | Node port for TCP | `""` | -| `service.nodePorts.tcpSecure` | Node port for TCP (with TLS) | `""` | -| `service.nodePorts.keeper` | ClickHouse keeper TCP container port | `""` | -| `service.nodePorts.keeperSecure` | ClickHouse keeper TCP (secure) container port | `""` | -| `service.nodePorts.keeperInter` | ClickHouse keeper interserver TCP container port | `""` | -| `service.nodePorts.mysql` | Node port for MySQL | `""` | -| `service.nodePorts.postgresql` | Node port for PostgreSQL | `""` | -| `service.nodePorts.interserver` | Node port for Interserver | `""` | -| `service.nodePorts.metrics` | Node port for metrics | `""` | -| `service.clusterIP` | ClickHouse service Cluster IP | `""` | -| `service.loadBalancerIP` | ClickHouse service Load Balancer IP | `""` | -| `service.loadBalancerSourceRanges` | ClickHouse service Load Balancer sources | `[]` | -| `service.externalTrafficPolicy` | ClickHouse service external traffic policy | `Cluster` | -| `service.annotations` | Additional custom annotations for ClickHouse service | `{}` | -| `service.extraPorts` | Extra ports to expose in ClickHouse service (normally used with the `sidecars` value) | `[]` | -| `service.sessionAffinity` | Control where client requests go, to the same pod or round-robin | `None` | -| `service.sessionAffinityConfig` | Additional settings for the sessionAffinity | `{}` | -| `service.headless.annotations` | Annotations for the headless service. | `{}` | -| `externalAccess.enabled` | Enable Kubernetes external cluster access to ClickHouse | `false` | -| `externalAccess.service.type` | Kubernetes Service type for external access. It can be NodePort, LoadBalancer or ClusterIP | `LoadBalancer` | -| `externalAccess.service.ports.http` | ClickHouse service HTTP port | `80` | -| `externalAccess.service.ports.https` | ClickHouse service HTTPS port | `443` | -| `externalAccess.service.ports.tcp` | ClickHouse service TCP port | `9000` | -| `externalAccess.service.ports.tcpSecure` | ClickHouse service TCP (secure) port | `9440` | -| `externalAccess.service.ports.keeper` | ClickHouse keeper TCP container port | `2181` | -| `externalAccess.service.ports.keeperSecure` | ClickHouse keeper TCP (secure) container port | `3181` | -| `externalAccess.service.ports.keeperInter` | ClickHouse keeper interserver TCP container port | `9444` | -| `externalAccess.service.ports.mysql` | ClickHouse service MySQL port | `9004` | -| `externalAccess.service.ports.postgresql` | ClickHouse service PostgreSQL port | `9005` | -| `externalAccess.service.ports.interserver` | ClickHouse service Interserver port | `9009` | -| `externalAccess.service.ports.metrics` | ClickHouse service metrics port | `8001` | -| `externalAccess.service.loadBalancerIPs` | Array of load balancer IPs for each ClickHouse . Length must be the same as replicaCount | `[]` | -| `externalAccess.service.loadBalancerAnnotations` | Array of load balancer annotations for each ClickHouse . Length must be the same as shards multiplied by replicaCount | `[]` | -| `externalAccess.service.loadBalancerSourceRanges` | Address(es) that are allowed when service is LoadBalancer | `[]` | -| `externalAccess.service.nodePorts.http` | Node port for HTTP | `[]` | -| `externalAccess.service.nodePorts.https` | Node port for HTTPS | `[]` | -| `externalAccess.service.nodePorts.tcp` | Node port for TCP | `[]` | -| `externalAccess.service.nodePorts.tcpSecure` | Node port for TCP (with TLS) | `[]` | -| `externalAccess.service.nodePorts.keeper` | ClickHouse keeper TCP container port | `[]` | -| `externalAccess.service.nodePorts.keeperSecure` | ClickHouse keeper TCP container port (with TLS) | `[]` | -| `externalAccess.service.nodePorts.keeperInter` | ClickHouse keeper interserver TCP container port | `[]` | -| `externalAccess.service.nodePorts.mysql` | Node port for MySQL | `[]` | -| `externalAccess.service.nodePorts.postgresql` | Node port for PostgreSQL | `[]` | -| `externalAccess.service.nodePorts.interserver` | Node port for Interserver | `[]` | -| `externalAccess.service.nodePorts.metrics` | Node port for metrics | `[]` | -| `externalAccess.service.labels` | Service labels for external access | `{}` | -| `externalAccess.service.annotations` | Service annotations for external access | `{}` | -| `externalAccess.service.extraPorts` | Extra ports to expose in the ClickHouse external service | `[]` | -| `ingress.enabled` | Enable ingress record generation for ClickHouse | `false` | -| `ingress.pathType` | Ingress path type | `ImplementationSpecific` | -| `ingress.apiVersion` | Force Ingress API version (automatically detected if not set) | `""` | -| `ingress.hostname` | Default host for the ingress record | `clickhouse.local` | -| `ingress.ingressClassName` | IngressClass that will be be used to implement the Ingress (Kubernetes 1.18+) | `""` | -| `ingress.path` | Default path for the ingress record | `/` | -| `ingress.annotations` | Additional annotations for the Ingress resource. To enable certificate autogeneration, place here your cert-manager annotations. | `{}` | -| `ingress.tls` | Enable TLS configuration for the host defined at `ingress.hostname` parameter | `false` | -| `ingress.selfSigned` | Create a TLS secret for this ingress record using self-signed certificates generated by Helm | `false` | -| `ingress.extraHosts` | An array with additional hostname(s) to be covered with the ingress record | `[]` | -| `ingress.extraPaths` | An array with additional arbitrary paths that may need to be added to the ingress under the main host | `[]` | -| `ingress.extraTls` | TLS configuration for additional hostname(s) to be covered with this ingress record | `[]` | -| `ingress.secrets` | Custom TLS certificates as secrets | `[]` | -| `ingress.extraRules` | Additional rules to be covered with this ingress record | `[]` | - -### Persistence Parameters - -| Name | Description | Value | -| --------------------------- | ----------------------------------------------------------------------- | ------------------- | -| `persistence.enabled` | Enable persistence using Persistent Volume Claims | `true` | -| `persistence.existingClaim` | Name of an existing PVC to use | `""` | -| `persistence.storageClass` | Storage class of backing PVC | `""` | -| `persistence.labels` | Persistent Volume Claim labels | `{}` | -| `persistence.annotations` | Persistent Volume Claim annotations | `{}` | -| `persistence.accessModes` | Persistent Volume Access Modes | `["ReadWriteOnce"]` | -| `persistence.size` | Size of data volume | `8Gi` | -| `persistence.selector` | Selector to match an existing Persistent Volume for ClickHouse data PVC | `{}` | -| `persistence.dataSource` | Custom PVC data source | `{}` | - -### Init Container Parameters - -| Name | Description | Value | -| ------------------------------------------------------ | ----------------------------------------------------------------------------------------------- | -------------------------- | -| `volumePermissions.enabled` | Enable init container that changes the owner/group of the PV mount point to `runAsUser:fsGroup` | `false` | -| `volumePermissions.image.registry` | OS Shell + Utility image registry | `REGISTRY_NAME` | -| `volumePermissions.image.repository` | OS Shell + Utility image repository | `REPOSITORY_NAME/os-shell` | -| `volumePermissions.image.pullPolicy` | OS Shell + Utility image pull policy | `IfNotPresent` | -| `volumePermissions.image.pullSecrets` | OS Shell + Utility image pull secrets | `[]` | -| `volumePermissions.resources.limits` | The resources limits for the init container | `{}` | -| `volumePermissions.resources.requests` | The requested resources for the init container | `{}` | -| `volumePermissions.containerSecurityContext.runAsUser` | Set init container's Security Context runAsUser | `0` | - -### Other Parameters - -| Name | Description | Value | -| --------------------------------------------- | ------------------------------------------------------------------------------------------------------ | ------- | -| `serviceAccount.create` | Specifies whether a ServiceAccount should be created | `true` | -| `serviceAccount.name` | The name of the ServiceAccount to use. | `""` | -| `serviceAccount.annotations` | Additional Service Account annotations (evaluated as a template) | `{}` | -| `serviceAccount.automountServiceAccountToken` | Automount service account token for the server service account | `true` | -| `metrics.enabled` | Enable the export of Prometheus metrics | `false` | -| `metrics.podAnnotations` | Annotations for metrics scraping | `{}` | -| `metrics.serviceMonitor.enabled` | if `true`, creates a Prometheus Operator ServiceMonitor (also requires `metrics.enabled` to be `true`) | `false` | -| `metrics.serviceMonitor.namespace` | Namespace in which Prometheus is running | `""` | -| `metrics.serviceMonitor.annotations` | Additional custom annotations for the ServiceMonitor | `{}` | -| `metrics.serviceMonitor.labels` | Extra labels for the ServiceMonitor | `{}` | -| `metrics.serviceMonitor.jobLabel` | The name of the label on the target service to use as the job name in Prometheus | `""` | -| `metrics.serviceMonitor.honorLabels` | honorLabels chooses the metric's labels on collisions with target labels | `false` | -| `metrics.serviceMonitor.interval` | Interval at which metrics should be scraped. | `""` | -| `metrics.serviceMonitor.scrapeTimeout` | Timeout after which the scrape is ended | `""` | -| `metrics.serviceMonitor.metricRelabelings` | Specify additional relabeling of metrics | `[]` | -| `metrics.serviceMonitor.relabelings` | Specify general relabeling | `[]` | -| `metrics.serviceMonitor.selector` | Prometheus instance selector labels | `{}` | -| `metrics.prometheusRule.enabled` | Create a PrometheusRule for Prometheus Operator | `false` | -| `metrics.prometheusRule.namespace` | Namespace for the PrometheusRule Resource (defaults to the Release Namespace) | `""` | -| `metrics.prometheusRule.additionalLabels` | Additional labels that can be used so PrometheusRule will be discovered by Prometheus | `{}` | -| `metrics.prometheusRule.rules` | PrometheusRule definitions | `[]` | - -### External Zookeeper paramaters - -| Name | Description | Value | -| --------------------------- | ----------------------------------------- | ------ | -| `externalZookeeper.servers` | List of external zookeeper servers to use | `[]` | -| `externalZookeeper.port` | Port of the Zookeeper servers | `2888` | - -### Zookeeper subchart parameters - -| Name | Description | Value | -| -------------------------------- | ----------------------------- | --------------------------- | -| `zookeeper.enabled` | Deploy Zookeeper subchart | `true` | -| `zookeeper.replicaCount` | Number of Zookeeper instances | `3` | -| `zookeeper.service.ports.client` | Zookeeper client port | `2181` | -| `zookeeper.image.registry` | Zookeeper image registry | `REGISTRY_NAME` | -| `zookeeper.image.repository` | Zookeeper image repository | `REPOSITORY_NAME/zookeeper` | -| `zookeeper.image.pullPolicy` | Zookeeper image pull policy | `IfNotPresent` | - -See to create the table. - -The above parameters map to the env variables defined in [bitnami/clickhouse](https://github.com/bitnami/containers/tree/main/bitnami/clickhouse). For more information please refer to the [bitnami/clickhouse](https://github.com/bitnami/containers/tree/main/bitnami/clickhouse) image documentation. - -Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, - -```console -helm install my-release \ - --set auth.username=admin \ - --set auth.password=password \ - oci://REGISTRY_NAME/REPOSITORY_NAME/clickhouse -``` - -> Note: You need to substitute the placeholders `REGISTRY_NAME` and `REPOSITORY_NAME` with a reference to your Helm chart registry and repository. For example, in the case of Bitnami, you need to use `REGISTRY_NAME=registry-1.docker.io` and `REPOSITORY_NAME=bitnamicharts`. - -The above command sets the ClickHouse administrator account username and password to `admin` and `password` respectively. - -> NOTE: Once this chart is deployed, it is not possible to change the application's access credentials, such as usernames or passwords, using Helm. To change these application credentials after deployment, delete any persistent volumes (PVs) used by the chart and re-deploy it, or use the application's built-in administrative tools if available. - -Alternatively, a YAML file that specifies the values for the above parameters can be provided while installing the chart. For example, - -```console -helm install my-release -f values.yaml oci://REGISTRY_NAME/REPOSITORY_NAME/clickhouse -``` - -> Note: You need to substitute the placeholders `REGISTRY_NAME` and `REPOSITORY_NAME` with a reference to your Helm chart registry and repository. For example, in the case of Bitnami, you need to use `REGISTRY_NAME=registry-1.docker.io` and `REPOSITORY_NAME=bitnamicharts`. -> **Tip**: You can use the default [values.yaml](https://github.com/bitnami/charts/tree/main/bitnami/clickhouse/values.yaml) - -## Configuration and installation details - -### [Rolling VS Immutable tags](https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/) - -It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image. - -Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist. - -### ClickHouse keeper support - -You can set `keeper.enabled` to use ClickHouse keeper. If `keeper.enabled=true`, Zookeeper settings will not be ignore. - -### External Zookeeper support - -You may want to have ClickHouse connect to an external zookeeper rather than installing one inside your cluster. Typical reasons for this are to use a managed database service, or to share a common database server for all your applications. To achieve this, the chart allows you to specify credentials for an external database with the [`externalZookeeper` parameter](#parameters). You should also disable the Zookeeper installation with the `zookeeper.enabled` option. Here is an example: - -```console -zookeper.enabled=false -externalZookeeper.host=myexternalhost -externalZookeeper.user=myuser -externalZookeeper.password=mypassword -externalZookeeper.database=mydatabase -externalZookeeper.port=3306 -``` - -### TLS secrets - -The chart also facilitates the creation of TLS secrets for use with the Ingress controller, with different options for certificate management. [Learn more about TLS secrets](https://docs.bitnami.com/kubernetes/infrastructure/clickhouse/administration/enable-tls-ingress/)). - -## Persistence - -The [Bitnami ClickHouse](https://github.com/bitnami/containers/tree/main/bitnami/clickhouse) image stores the ClickHouse data and configurations at the `/bitnami` path of the container. Persistent Volume Claims are used to keep the data across deployments. This is known to work in GCE, AWS, and minikube. - -### Additional environment variables - -In case you want to add extra environment variables (useful for advanced operations like custom init scripts), you can use the `extraEnvVars` property. - -```yaml -clickhouse: - extraEnvVars: - - name: LOG_LEVEL - value: error -``` - -Alternatively, you can use a ConfigMap or a Secret with the environment variables. To do so, use the `extraEnvVarsCM` or the `extraEnvVarsSecret` values. - -### Sidecars - -If additional containers are needed in the same pod as ClickHouse (such as additional metrics or logging exporters), they can be defined using the `sidecars` parameter. If these sidecars export extra ports, extra port definitions can be added using the `service.extraPorts` parameter. [Learn more about configuring and using sidecar containers](https://docs.bitnami.com/kubernetes/infrastructure/clickhouse/configuration/configure-sidecar-init-containers/). - -### Ingress without TLS - -For using ingress (example without TLS): - -```yaml -ingress: - ## If true, ClickHouse server Ingress will be created - ## - enabled: true - - ## ClickHouse server Ingress annotations - ## - annotations: {} - # kubernetes.io/ingress.class: nginx - # kubernetes.io/tls-acme: 'true' - - ## ClickHouse server Ingress hostnames - ## Must be provided if Ingress is enabled - ## - hosts: - - clickhouse.domain.com -``` - -### Ingress TLS - -If your cluster allows automatic creation/retrieval of TLS certificates (e.g. [kube-lego](https://github.com/jetstack/kube-lego)), please refer to the documentation for that mechanism. - -To manually configure TLS, first create/retrieve a key & certificate pair for the address(es) you wish to protect. Then create a TLS secret (named `clickhouse-server-tls` in this example) in the namespace. Include the secret's name, along with the desired hostnames, in the Ingress TLS section of your custom `values.yaml` file: - -```yaml -ingress: - ## If true, ClickHouse server Ingress will be created - ## - enabled: true - - ## ClickHouse server Ingress annotations - ## - annotations: {} - # kubernetes.io/ingress.class: nginx - # kubernetes.io/tls-acme: 'true' - - ## ClickHouse server Ingress hostnames - ## Must be provided if Ingress is enabled - ## - hosts: - - clickhouse.domain.com - - ## ClickHouse server Ingress TLS configuration - ## Secrets must be manually created in the namespace - ## - tls: - - secretName: clickhouse-server-tls - hosts: - - clickhouse.domain.com -``` - -### Using custom scripts - -For advanced operations, the Bitnami ClickHouse chart allows using custom init and start scripts that will be mounted in `/docker-entrypoint.initdb.d` and `/docker-entrypoint.startdb.d` . The `init` scripts will be run on the first boot whereas the `start` scripts will be run on every container start. For adding the scripts directly as values use the `initdbScripts` and `startdbScripts` values. For using Secrets use the `initdbScriptsSecret` and `startdbScriptsSecret`. - -```yaml -initdbScriptsSecret: init-scripts-secret -startdbScriptsSecret: start-scripts-secret -``` - -### Pod affinity - -This chart allows you to set your custom affinity using the `affinity` parameter. Find more information about Pod affinity in the [kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity). - -As an alternative, use one of the preset configurations for pod affinity, pod anti-affinity, and node affinity available at the [bitnami/common](https://github.com/bitnami/charts/tree/main/bitnami/common#affinities) chart. To do so, set the `podAffinityPreset`, `podAntiAffinityPreset`, or `nodeAffinityPreset` parameters. - -## Troubleshooting - -Find more information about how to deal with common errors related to Bitnami's Helm charts in [this troubleshooting guide](https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues). - -## Upgrading - -### To 2.0.0 - -This major updates the Zookeeper subchart to it newest major, 11.0.0. For more information on this subchart's major, please refer to [zookeeper upgrade notes](https://github.com/bitnami/charts/tree/main/bitnami/zookeeper#to-1100). - -## License - -Copyright © 2023 VMware, Inc. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. \ No newline at end of file diff --git a/charts/clickhouse/templates/NOTES.txt b/charts/clickhouse/templates/NOTES.txt deleted file mode 100644 index 4bb61dab..00000000 --- a/charts/clickhouse/templates/NOTES.txt +++ /dev/null @@ -1,58 +0,0 @@ -CHART NAME: {{ .Chart.Name }} -CHART VERSION: {{ .Chart.Version }} -APP VERSION: {{ .Chart.AppVersion }} - -** Please be patient while the chart is being deployed ** - -{{- if .Values.diagnosticMode.enabled }} -The chart has been deployed in diagnostic mode. All probes have been disabled and the command has been overwritten with: - - command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 4 }} - args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 4 }} - -Get the list of pods by executing: - - kubectl get pods --namespace {{ include "common.names.namespace" . | quote }} -l app.kubernetes.io/instance={{ .Release.Name }} - -Access the pod you want to debug by executing - - kubectl exec --namespace {{ include "common.names.namespace" . | quote }} -ti -- bash - -In order to replicate the container startup scripts execute this command: - - /opt/bitnami/scripts/clickhouse/entrypoint.sh /opt/bitnami/scripts/clickhouse/run.sh - -{{- else }} - -ClickHouse is available in the following address: - -{{- if .Values.externalAccess.enabled }} - -NOTE: It may take a few minutes for the LoadBalancer IP to be available. - - kubectl get svc --namespace {{ template "common.names.namespace" . }} -l "app.kubernetes.io/name={{ template "common.names.fullname" . }},app.kubernetes.io/instance={{ .Release.Name }},app.kubernetes.io/component=clickhouse" -w - -{{- else if (eq "LoadBalancer" .Values.service.type) }} - - export SERVICE_IP=$(kubectl get svc --namespace {{ template "common.names.namespace" . }} {{ template "common.names.fullname" . }} --template "{{ "{{ range (index .status.loadBalancer.ingress 0) }}{{ . }}{{ end }}" }}") - -{{- else if (eq "NodePort" .Values.service.type)}} - - export NODE_IP=$(kubectl get nodes --namespace {{ template "common.names.namespace" . }} -o jsonpath="{.items[0].status.addresses[0].address}") - export NODE_PORT=$(kubectl get --namespace {{ template "common.names.namespace" . }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "common.names.fullname" . }}) - -{{- else if (eq "ClusterIP" .Values.service.type)}} - - kubectl port-forward --namespace {{ template "common.names.namespace" . }} svc/{{ template "common.names.fullname" . }} {{ .Values.service.ports.tcp }}:9000 & - -{{- end }} - -Credentials: - - echo "Username : {{ .Values.auth.username }}" - echo "Password : $(kubectl get secret --namespace {{ .Release.Namespace }} {{ include "clickhouse.secretName" . }} -o jsonpath="{.data.{{ include "clickhouse.secretKey" .}}}" | base64 -d)" - -{{- end }} - -{{- include "common.warnings.rollingTag" .Values.image }} -{{- include "clickhouse.validateValues" . }} diff --git a/charts/clickhouse/templates/_helpers.tpl b/charts/clickhouse/templates/_helpers.tpl deleted file mode 100644 index b5243526..00000000 --- a/charts/clickhouse/templates/_helpers.tpl +++ /dev/null @@ -1,219 +0,0 @@ -{{/* -Copyright VMware, Inc. -SPDX-License-Identifier: APACHE-2.0 -*/}} - -{{/* -Return the proper ClickHouse image name -*/}} -{{- define "clickhouse.image" -}} -{{ include "common.images.image" (dict "imageRoot" .Values.image "global" .Values.global) }} -{{- end -}} - -{{/* -Return the proper image name (for the init container volume-permissions image) -*/}} -{{- define "clickhouse.volumePermissions.image" -}} -{{- include "common.images.image" ( dict "imageRoot" .Values.volumePermissions.image "global" .Values.global ) -}} -{{- end -}} - -{{/* -Return the proper Docker Image Registry Secret Names -*/}} -{{- define "clickhouse.imagePullSecrets" -}} -{{- include "common.images.pullSecrets" (dict "images" (list .Values.image .Values.volumePermissions.image) "global" .Values.global) -}} -{{- end -}} - -{{/* -Return true if a TLS credentials secret object should be created -*/}} -{{- define "clickhouse.createTlsSecret" -}} -{{- if and .Values.tls.autoGenerated (not .Values.tls.certificatesSecret) }} - {{- true -}} -{{- end -}} -{{- end -}} - -{{/* -Return the path to the CA cert file. -*/}} -{{- define "clickhouse.tlsSecretName" -}} -{{- if .Values.tls.autoGenerated }} - {{- printf "%s-crt" (include "common.names.fullname" .) -}} -{{- else -}} - {{ required "A secret containing TLS certificates is required when TLS is enabled" .Values.tls.certificatesSecret }} -{{- end -}} -{{- end -}} - -{{/* -Return the path to the cert file. -*/}} -{{- define "clickhouse.tlsCert" -}} -{{- if .Values.tls.autoGenerated }} - {{- printf "/opt/bitnami/clickhouse/certs/tls.crt" -}} -{{- else -}} - {{- required "Certificate filename is required when TLS in enabled" .Values.tls.certFilename | printf "/opt/bitnami/clickhouse/certs/%s" -}} -{{- end -}} -{{- end -}} - -{{/* -Return the path to the cert key file. -*/}} -{{- define "clickhouse.tlsCertKey" -}} -{{- if .Values.tls.autoGenerated }} - {{- printf "/opt/bitnami/clickhouse/certs/tls.key" -}} -{{- else -}} -{{- required "Certificate Key filename is required when TLS in enabled" .Values.tls.certKeyFilename | printf "/opt/bitnami/clickhouse/certs/%s" -}} -{{- end -}} -{{- end -}} - -{{/* -Return the path to the CA cert file. -*/}} -{{- define "clickhouse.tlsCACert" -}} -{{- if .Values.tls.autoGenerated }} - {{- printf "/opt/bitnami/clickhouse/certs/ca.crt" -}} -{{- else -}} - {{- printf "/opt/bitnami/clickhouse/certs/%s" .Values.tls.certCAFilename -}} -{{- end -}} -{{- end -}} - -{{/* -Get the ClickHouse configuration configmap. -*/}} -{{- define "clickhouse.configmapName" -}} -{{- if .Values.existingOverridesConfigmap -}} - {{- .Values.existingOverridesConfigmap -}} -{{- else }} - {{- printf "%s" (include "common.names.fullname" . ) -}} -{{- end -}} -{{- end -}} - -{{/* -Get the ClickHouse configuration configmap. -*/}} -{{- define "clickhouse.extraConfigmapName" -}} -{{- if .Values.extraOverridesConfigmap -}} - {{- .Values.extraOverridesConfigmap -}} -{{- else }} - {{- printf "%s-extra" (include "common.names.fullname" . ) -}} -{{- end -}} -{{- end -}} - - -{{/* -Get the ClickHouse configuration users configmap. -*/}} -{{- define "clickhouse.usersExtraConfigmapName" -}} -{{- if .Values.usersExtraOverridesConfigmap -}} - {{- .Values.usersExtraOverridesConfigmap -}} -{{- else }} - {{- printf "%s-users-extra" (include "common.names.fullname" . ) -}} -{{- end -}} -{{- end -}} - -{{/* -Get the Clickhouse password secret name -*/}} -{{- define "clickhouse.secretName" -}} -{{- if .Values.auth.existingSecret -}} - {{- .Values.auth.existingSecret -}} -{{- else }} - {{- printf "%s" (include "common.names.fullname" . ) -}} -{{- end -}} -{{- end -}} - -{{/* -Get the ClickHouse password key inside the secret -*/}} -{{- define "clickhouse.secretKey" -}} -{{- if .Values.auth.existingSecret -}} - {{- .Values.auth.existingSecretKey -}} -{{- else }} - {{- print "admin-password" -}} -{{- end -}} -{{- end -}} - -{{/* -Get the startialization scripts Secret name. -*/}} -{{- define "clickhouse.startdbScriptsSecret" -}} -{{- if .Values.startdbScriptsSecret -}} - {{- printf "%s" (tpl .Values.startdbScriptsSecret $) -}} -{{- else -}} - {{- printf "%s-start-scripts" (include "common.names.fullname" .) -}} -{{- end -}} -{{- end -}} - -{{/* -Get the initialization scripts Secret name. -*/}} -{{- define "clickhouse.initdbScriptsSecret" -}} -{{- if .Values.initdbScriptsSecret -}} - {{- printf "%s" (tpl .Values.initdbScriptsSecret $) -}} -{{- else -}} - {{- printf "%s-init-scripts" (include "common.names.fullname" .) -}} -{{- end -}} -{{- end -}} - -{{/* -Return the path to the CA cert file. -*/}} -{{- define "clickhouse.headlessServiceName" -}} -{{- printf "%s-headless" (include "common.names.fullname" .) -}} -{{- end -}} - -{{/* -Create a default fully qualified app name. -We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). -*/}} -{{- define "clickhouse.zookeeper.fullname" -}} -{{- include "common.names.dependency.fullname" (dict "chartName" "zookeeper" "chartValues" .Values.zookeeper "context" $) -}} -{{- end -}} - -{{/* -Return the path to the CA cert file. -*/}} -{{- define "clickhouse.zookeeper.headlessServiceName" -}} -{{- printf "%s-headless" (include "clickhouse.zookeeper.fullname" .) -}} -{{- end -}} - -{{/* -Create the name of the service account to use -*/}} -{{- define "clickhouse.serviceAccountName" -}} -{{- if .Values.serviceAccount.create -}} - {{ default (include "common.names.fullname" .) .Values.serviceAccount.name }} -{{- else -}} - {{ default "default" .Values.serviceAccount.name }} -{{- end -}} -{{- end -}} - -{{/* -Compile all warnings into a single message. -*/}} -{{- define "clickhouse.validateValues" -}} -{{- $messages := list -}} -{{- $messages := append $messages (include "clickhouse.validateValues.zookeeper" .) -}} -{{- $messages := without $messages "" -}} -{{- $message := join "\n" $messages -}} - -{{- if $message -}} -{{- printf "\nVALUES VALIDATION:\n%s" $message -}} -{{- end -}} -{{- end -}} - -{{/* Validate values of ClickHouse - [Zoo]keeper */}} -{{- define "clickhouse.validateValues.zookeeper" -}} -{{- if or (and .Values.keeper.enabled .Values.zookeeper.enabled) (and .Values.keeper.enabled .Values.externalZookeeper.servers) (and .Values.zookeeper.enabled .Values.externalZookeeper.servers) -}} -clickhouse: Multiple [Zoo]keeper - You can only use one [zoo]keeper - Please choose use ClickHouse keeper or - installing a Zookeeper chart (--set zookeeper.enabled=true) or - using an external instance (--set zookeeper.servers ) -{{- end -}} -{{- if and (not .Values.keeper.enabled) (not .Values.zookeeper.enabled) (not .Values.externalZookeeper.servers) (ne (int .Values.shards) 1) (ne (int .Values.replicaCount) 1) -}} -clickhouse: No [Zoo]keeper - If you are deploying more than one ClickHouse instance, you need to enable [Zoo]keeper. Please choose installing a [Zoo]keeper (--set keeper.enabled=true) or (--set zookeeper.enabled=true) or - using an external instance (--set zookeeper.servers ) -{{- end -}} -{{- end -}} diff --git a/charts/clickhouse/templates/configmap-extra.yaml b/charts/clickhouse/templates/configmap-extra.yaml deleted file mode 100644 index 153cf4d5..00000000 --- a/charts/clickhouse/templates/configmap-extra.yaml +++ /dev/null @@ -1,20 +0,0 @@ -{{- /* -Copyright VMware, Inc. -SPDX-License-Identifier: APACHE-2.0 -*/}} - -{{- if and .Values.extraOverrides (not .Values.extraOverridesConfigmap) }} -apiVersion: v1 -kind: ConfigMap -metadata: - name: {{ printf "%s-extra" (include "common.names.fullname" .) }} - namespace: {{ include "common.names.namespace" . | quote }} - labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} - app.kubernetes.io/component: clickhouse - {{- if .Values.commonAnnotations }} - annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} - {{- end }} -data: - 01_extra_overrides.xml: | - {{- include "common.tplvalues.render" (dict "value" .Values.extraOverrides "context" $) | nindent 4 }} -{{- end }} diff --git a/charts/clickhouse/templates/configmap-users-extra.yaml b/charts/clickhouse/templates/configmap-users-extra.yaml deleted file mode 100644 index 056d2d02..00000000 --- a/charts/clickhouse/templates/configmap-users-extra.yaml +++ /dev/null @@ -1,20 +0,0 @@ -{{- /* -Copyright VMware, Inc. -SPDX-License-Identifier: APACHE-2.0 -*/}} - -{{- if and .Values.usersExtraOverrides (not .Values.usersExtraOverridesConfigmap) }} -apiVersion: v1 -kind: ConfigMap -metadata: - name: {{ printf "%s-users-extra" (include "common.names.fullname" .) }} - namespace: {{ include "common.names.namespace" . | quote }} - labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} - app.kubernetes.io/component: clickhouse - {{- if .Values.commonAnnotations }} - annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} - {{- end }} -data: - 01_users_extra_overrides.xml: | - {{- include "common.tplvalues.render" (dict "value" .Values.usersExtraOverrides "context" $) | nindent 4 }} -{{- end }} diff --git a/charts/clickhouse/templates/configmap.yaml b/charts/clickhouse/templates/configmap.yaml deleted file mode 100644 index 2462712b..00000000 --- a/charts/clickhouse/templates/configmap.yaml +++ /dev/null @@ -1,20 +0,0 @@ -{{- /* -Copyright VMware, Inc. -SPDX-License-Identifier: APACHE-2.0 -*/}} - -{{- if not .Values.existingOverridesConfigmap }} -apiVersion: v1 -kind: ConfigMap -metadata: - name: {{ template "common.names.fullname" . }} - namespace: {{ include "common.names.namespace" . | quote }} - labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} - app.kubernetes.io/component: clickhouse - {{- if .Values.commonAnnotations }} - annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} - {{- end }} -data: - 00_default_overrides.xml: | - {{- include "common.tplvalues.render" (dict "value" .Values.defaultConfigurationOverrides "context" $) | nindent 4 }} -{{- end }} diff --git a/charts/clickhouse/templates/extra-list.yaml b/charts/clickhouse/templates/extra-list.yaml deleted file mode 100644 index 2d35a580..00000000 --- a/charts/clickhouse/templates/extra-list.yaml +++ /dev/null @@ -1,9 +0,0 @@ -{{- /* -Copyright VMware, Inc. -SPDX-License-Identifier: APACHE-2.0 -*/}} - -{{- range .Values.extraDeploy }} ---- -{{ include "common.tplvalues.render" (dict "value" . "context" $) }} -{{- end }} diff --git a/charts/clickhouse/templates/ingress-tls-secrets.yaml b/charts/clickhouse/templates/ingress-tls-secrets.yaml deleted file mode 100644 index 6ef20e36..00000000 --- a/charts/clickhouse/templates/ingress-tls-secrets.yaml +++ /dev/null @@ -1,44 +0,0 @@ -{{- /* -Copyright VMware, Inc. -SPDX-License-Identifier: APACHE-2.0 -*/}} - -{{- if .Values.ingress.enabled }} -{{- if .Values.ingress.secrets }} -{{- range .Values.ingress.secrets }} -apiVersion: v1 -kind: Secret -metadata: - name: {{ .name }} - namespace: {{ $.Release.Namespace | quote }} - labels: {{- include "common.labels.standard" ( dict "customLabels" $.Values.commonLabels "context" $ ) | nindent 4 }} - {{- if $.Values.commonAnnotations }} - annotations: {{- include "common.tplvalues.render" ( dict "value" $.Values.commonAnnotations "context" $ ) | nindent 4 }} - {{- end }} -type: kubernetes.io/tls -data: - tls.crt: {{ .certificate | b64enc }} - tls.key: {{ .key | b64enc }} ---- -{{- end }} -{{- end }} -{{- if and .Values.ingress.tls .Values.ingress.selfSigned }} -{{- $secretName := printf "%s-tls" .Values.ingress.hostname }} -{{- $ca := genCA "clickhouse-ca" 365 }} -{{- $cert := genSignedCert .Values.ingress.hostname nil (list .Values.ingress.hostname) 365 $ca }} -apiVersion: v1 -kind: Secret -metadata: - name: {{ $secretName }} - namespace: {{ .Release.Namespace | quote }} - labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} - {{- if .Values.commonAnnotations }} - annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} - {{- end }} -type: kubernetes.io/tls -data: - tls.crt: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "tls.crt" "defaultValue" $cert.Cert "context" $) }} - tls.key: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "tls.key" "defaultValue" $cert.Key "context" $) }} - ca.crt: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "ca.crt" "defaultValue" $ca.Cert "context" $) }} -{{- end }} -{{- end }} diff --git a/charts/clickhouse/templates/ingress.yaml b/charts/clickhouse/templates/ingress.yaml deleted file mode 100644 index 7000eceb..00000000 --- a/charts/clickhouse/templates/ingress.yaml +++ /dev/null @@ -1,59 +0,0 @@ -{{- /* -Copyright VMware, Inc. -SPDX-License-Identifier: APACHE-2.0 -*/}} - -{{- if .Values.ingress.enabled }} -apiVersion: {{ include "common.capabilities.ingress.apiVersion" . }} -kind: Ingress -metadata: - name: {{ include "common.names.fullname" . }} - namespace: {{ .Release.Namespace | quote }} - labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} - {{- if or .Values.ingress.annotations .Values.commonAnnotations }} - {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.ingress.annotations .Values.commonAnnotations ) "context" . ) }} - annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} - {{- end }} -spec: - {{- if and .Values.ingress.ingressClassName (eq "true" (include "common.ingress.supportsIngressClassname" .)) }} - ingressClassName: {{ .Values.ingress.ingressClassName | quote }} - {{- end }} - rules: - {{- if .Values.ingress.hostname }} - - host: {{ .Values.ingress.hostname | quote }} - http: - paths: - {{- if .Values.ingress.extraPaths }} - {{- toYaml .Values.ingress.extraPaths | nindent 10 }} - {{- end }} - - path: {{ .Values.ingress.path }} - {{- if eq "true" (include "common.ingress.supportsPathType" .) }} - pathType: {{ .Values.ingress.pathType }} - {{- end }} - backend: {{- include "common.ingress.backend" (dict "serviceName" (include "common.names.fullname" .) "servicePort" "http" "context" $) | nindent 14 }} - {{- end }} - {{- range .Values.ingress.extraHosts }} - - host: {{ .name | quote }} - http: - paths: - - path: {{ default "/" .path }} - {{- if eq "true" (include "common.ingress.supportsPathType" $) }} - pathType: {{ default "ImplementationSpecific" .pathType }} - {{- end }} - backend: {{- include "common.ingress.backend" (dict "serviceName" (include "common.names.fullname" $) "servicePort" "http" "context" $) | nindent 14 }} - {{- end }} - {{- if .Values.ingress.extraRules }} - {{- include "common.tplvalues.render" (dict "value" .Values.ingress.extraRules "context" $) | nindent 4 }} - {{- end }} - {{- if or (and .Values.ingress.tls (or (include "common.ingress.certManagerRequest" ( dict "annotations" .Values.ingress.annotations )) .Values.ingress.selfSigned)) .Values.ingress.extraTls }} - tls: - {{- if and .Values.ingress.tls (or (include "common.ingress.certManagerRequest" ( dict "annotations" .Values.ingress.annotations )) .Values.ingress.selfSigned) }} - - hosts: - - {{ .Values.ingress.hostname | quote }} - secretName: {{ printf "%s-tls" .Values.ingress.hostname }} - {{- end }} - {{- if .Values.ingress.extraTls }} - {{- include "common.tplvalues.render" (dict "value" .Values.ingress.extraTls "context" $) | nindent 4 }} - {{- end }} - {{- end }} -{{- end }} diff --git a/charts/clickhouse/templates/init-scripts-secret.yaml b/charts/clickhouse/templates/init-scripts-secret.yaml deleted file mode 100644 index 32367093..00000000 --- a/charts/clickhouse/templates/init-scripts-secret.yaml +++ /dev/null @@ -1,19 +0,0 @@ -{{- /* -Copyright VMware, Inc. -SPDX-License-Identifier: APACHE-2.0 -*/}} - -{{- if and .Values.initdbScripts (not .Values.initdbScriptsSecret) }} -apiVersion: v1 -kind: Secret -metadata: - name: {{ printf "%s-init-scripts" (include "common.names.fullname" .) }} - namespace: {{ include "common.names.namespace" . | quote }} - labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} - app.kubernetes.io/component: clickhouse - {{- if .Values.commonAnnotations }} - annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} - {{- end }} -stringData: -{{- include "common.tplvalues.render" (dict "value" .Values.initdbScripts "context" .) | nindent 2 }} -{{- end }} diff --git a/charts/clickhouse/templates/prometheusrule.yaml b/charts/clickhouse/templates/prometheusrule.yaml deleted file mode 100644 index dc2d05d3..00000000 --- a/charts/clickhouse/templates/prometheusrule.yaml +++ /dev/null @@ -1,24 +0,0 @@ -{{- /* -Copyright VMware, Inc. -SPDX-License-Identifier: APACHE-2.0 -*/}} - -{{- if and .Values.metrics.enabled .Values.metrics.prometheusRule.enabled .Values.metrics.prometheusRule.rules }} -apiVersion: monitoring.coreos.com/v1 -kind: PrometheusRule -metadata: - name: {{ include "common.names.fullname" . }} - namespace: {{ default .Release.Namespace .Values.metrics.prometheusRule.namespace | quote }} - labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} - app.kubernetes.io/component: metrics - {{- if .Values.metrics.prometheusRule.additionalLabels }} - {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.prometheusRule.additionalLabels "context" $ ) | nindent 4 }} - {{- end }} - {{- if .Values.commonAnnotations }} - annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} - {{- end }} -spec: - groups: - - name: {{ include "common.names.fullname" . }} - rules: {{- toYaml .Values.metrics.prometheusRule.rules | nindent 8 }} -{{- end }} diff --git a/charts/clickhouse/templates/scripts-configmap.yaml b/charts/clickhouse/templates/scripts-configmap.yaml deleted file mode 100644 index 86aa34dc..00000000 --- a/charts/clickhouse/templates/scripts-configmap.yaml +++ /dev/null @@ -1,34 +0,0 @@ -{{- /* -Copyright VMware, Inc. -SPDX-License-Identifier: APACHE-2.0 -*/}} - -apiVersion: v1 -kind: ConfigMap -metadata: - name: {{ printf "%s-scripts" (include "common.names.fullname" .) }} - namespace: {{ include "common.names.namespace" . | quote }} - labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} - app.kubernetes.io/component: clickhouse - {{- if .Values.commonAnnotations }} - annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} - {{- end }} -data: - setup.sh: |- - #!/bin/bash - - # Execute entrypoint as usual after obtaining KEEPER_SERVER_ID - # check KEEPER_SERVER_ID in persistent volume via myid - # if not present, set based on POD hostname - if [[ -f "/bitnami/clickhouse/keeper/data/myid" ]]; then - export KEEPER_SERVER_ID="$(cat /bitnami/clickhouse/keeper/data/myid)" - else - HOSTNAME="$(hostname -s)" - if [[ $HOSTNAME =~ (.*)-([0-9]+)$ ]]; then - export KEEPER_SERVER_ID=${BASH_REMATCH[2]} - else - echo "Failed to get index from hostname $HOST" - exit 1 - fi - fi - exec /opt/bitnami/scripts/clickhouse/entrypoint.sh /opt/bitnami/scripts/clickhouse/run.sh -- --listen_host=0.0.0.0 diff --git a/charts/clickhouse/templates/service-account.yaml b/charts/clickhouse/templates/service-account.yaml deleted file mode 100644 index 649086da..00000000 --- a/charts/clickhouse/templates/service-account.yaml +++ /dev/null @@ -1,19 +0,0 @@ -{{- /* -Copyright VMware, Inc. -SPDX-License-Identifier: APACHE-2.0 -*/}} - -{{- if .Values.serviceAccount.create }} -apiVersion: v1 -kind: ServiceAccount -metadata: - name: {{ include "clickhouse.serviceAccountName" . }} - namespace: {{ include "common.names.namespace" . | quote }} - labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} - app.kubernetes.io/component: clickhouse - {{- if or .Values.serviceAccount.annotations .Values.commonAnnotations }} - {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.serviceAccount.annotations .Values.commonAnnotations ) "context" . ) }} - annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} - {{- end }} -automountServiceAccountToken: {{ .Values.serviceAccount.automountServiceAccountToken }} -{{- end }} diff --git a/charts/clickhouse/templates/service-external-access.yaml b/charts/clickhouse/templates/service-external-access.yaml deleted file mode 100644 index f50baa21..00000000 --- a/charts/clickhouse/templates/service-external-access.yaml +++ /dev/null @@ -1,155 +0,0 @@ -{{- /* -Copyright VMware, Inc. -SPDX-License-Identifier: APACHE-2.0 -*/}} - -{{- if $.Values.externalAccess.enabled }} -{{- $shards := $.Values.shards | int }} -{{- $replicas := $.Values.replicaCount | int }} -{{- $totalNodes := mul $shards $replicas }} -{{- range $shard, $e := until $shards }} -{{- range $i, $_e := until $replicas }} -{{- $loadBalancerAnnotationPosOffset := mul $shard $replicas }} -{{- $loadBalancerAnnotationPosition := add $loadBalancerAnnotationPosOffset $i }} -{{- $targetPod := printf "%s-shard%d-%d" (include "common.names.fullname" $) $shard $i }} -apiVersion: v1 -kind: Service -metadata: - name: {{ printf "%s-external" $targetPod | trunc 63 | trimSuffix "-" }} - namespace: {{ $.Release.Namespace | quote }} - {{- $labels := include "common.tplvalues.merge" ( dict "values" ( list $.Values.externalAccess.service.labels $.Values.commonLabels ) "context" $ ) }} - labels: {{- include "common.labels.standard" ( dict "customLabels" $labels "context" $ ) | nindent 4 }} - app.kubernetes.io/component: clickhouse - pod: {{ $targetPod }} - {{- if or $.Values.externalAccess.service.annotations $.Values.commonAnnotations $.Values.externalAccess.service.loadBalancerAnnotations }} - annotations: - {{- if and (not (empty $.Values.externalAccess.service.loadBalancerAnnotations)) (eq (len $.Values.externalAccess.service.loadBalancerAnnotations) $totalNodes) }} - {{ include "common.tplvalues.render" ( dict "value" (index $.Values.externalAccess.service.loadBalancerAnnotations $loadBalancerAnnotationPosition) "context" $) | nindent 4 }} - {{- end }} - {{- if $.Values.externalAccess.service.annotations }} - {{- include "common.tplvalues.render" ( dict "value" $.Values.externalAccess.service.annotations "context" $) | nindent 4 }} - {{- end }} - {{- if $.Values.commonAnnotations }} - {{- include "common.tplvalues.render" ( dict "value" $.Values.commonAnnotations "context" $ ) | nindent 4 }} - {{- end }} - {{- end }} -spec: - type: {{ $.Values.externalAccess.service.type }} - {{- if eq $.Values.externalAccess.service.type "LoadBalancer" }} - {{- if and (not (empty $.Values.externalAccess.service.loadBalancerIPs)) (eq (len $.Values.externalAccess.service.loadBalancerIPs) $totalNodes) }} - loadBalancerIP: {{ index $.Values.externalAccess.service.loadBalancerIPs $i }} - {{- end }} - {{- if $.Values.externalAccess.service.loadBalancerSourceRanges }} - loadBalancerSourceRanges: {{- toYaml $.Values.externalAccess.service.loadBalancerSourceRanges | nindent 4 }} - {{- end }} - {{- end }} - ports: - - name: http - port: {{ $.Values.externalAccess.service.ports.http }} - targetPort: http - {{- if not (empty $.Values.externalAccess.service.nodePorts.http) }} - nodePort: {{ index $.Values.externalAccess.service.nodePorts.http $i }} - {{- else }} - nodePort: null - {{- end }} - {{- if $.Values.tls.enabled }} - - name: https - port: {{ $.Values.externalAccess.service.ports.https }} - targetPort: https - {{- if not (empty $.Values.externalAccess.service.nodePorts.https) }} - nodePort: {{ index $.Values.externalAccess.service.nodePorts.https $i }} - {{- else }} - nodePort: null - {{- end }} - {{- end }} - {{- if $.Values.metrics.enabled }} - - name: http-metrics - port: {{ $.Values.externalAccess.service.ports.metrics }} - targetPort: http-metrics - {{- if not (empty $.Values.externalAccess.service.nodePorts.metrics) }} - nodePort: {{ index $.Values.externalAccess.service.nodePorts.metrics $i }} - {{- else }} - nodePort: null - {{- end }} - {{- end }} - - name: tcp - port: {{ $.Values.externalAccess.service.ports.tcp }} - targetPort: tcp - {{- if not (empty $.Values.externalAccess.service.nodePorts.tcp) }} - nodePort: {{ index $.Values.externalAccess.service.nodePorts.tcp $i }} - {{- else }} - nodePort: null - {{- end }} - {{- if $.Values.tls.enabled }} - - name: tcp-secure - port: {{ $.Values.externalAccess.service.ports.tcpSecure }} - targetPort: tcp-secure - {{- if not (empty $.Values.externalAccess.service.nodePorts.tcpSecure) }} - nodePort: {{ index $.Values.externalAccess.service.nodePorts.tcpSecure $i }} - {{- else }} - nodePort: null - {{- end }} - {{- end }} - {{- if $.Values.keeper.enabled }} - - name: tcp-keeper - port: {{ $.Values.externalAccess.service.ports.keeper }} - targetPort: tcp-keeper - {{- if not (empty $.Values.externalAccess.service.nodePorts.keeper) }} - nodePort: {{ index $.Values.externalAccess.service.nodePorts.keeper $i }} - {{- else }} - nodePort: null - {{- end }} - - name: tcp-keeperinter - port: {{ $.Values.externalAccess.service.ports.keeperInter }} - targetPort: tcp-keeperinter - {{- if not (empty $.Values.externalAccess.service.nodePorts.keeperInter) }} - nodePort: {{ index $.Values.externalAccess.service.nodePorts.keeperInter $i }} - {{- else }} - nodePort: null - {{- end }} - {{- if $.Values.tls.enabled }} - - name: tcp-keepertls - port: {{ $.Values.externalAccess.service.ports.keeperSecure }} - targetPort: tcp-keepertls - {{- if not (empty $.Values.externalAccess.service.nodePorts.keeperSecure) }} - nodePort: {{ index $.Values.externalAccess.service.nodePorts.keeperSecure $i }} - {{- else }} - nodePort: null - {{- end }} - {{- end }} - {{- end }} - - name: tcp-mysql - port: {{ $.Values.externalAccess.service.ports.mysql }} - targetPort: tcp-mysql - {{- if not (empty $.Values.externalAccess.service.nodePorts.mysql) }} - nodePort: {{ index $.Values.externalAccess.service.nodePorts.mysql $i }} - {{- else }} - nodePort: null - {{- end }} - - name: tcp-postgresql - port: {{ $.Values.externalAccess.service.ports.postgresql }} - targetPort: tcp-postgresql - {{- if not (empty $.Values.externalAccess.service.nodePorts.postgresql) }} - nodePort: {{ index $.Values.externalAccess.service.nodePorts.postgresql $i }} - {{- else }} - nodePort: null - {{- end }} - - name: tcp-intersrv - port: {{ $.Values.externalAccess.service.ports.interserver }} - targetPort: tcp-intersrv - {{- if not (empty $.Values.externalAccess.service.nodePorts.interserver) }} - nodePort: {{ index $.Values.externalAccess.service.nodePorts.interserver $i }} - {{- else }} - nodePort: null - {{- end }} - {{- if $.Values.externalAccess.service.extraPorts }} - {{- include "common.tplvalues.render" (dict "value" $.Values.externalAccess.service.extraPorts "context" $) | nindent 4 }} - {{- end }} - {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list $.Values.podLabels $.Values.commonLabels ) "context" $ ) }} - selector: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 4 }} - app.kubernetes.io/component: clickhouse - statefulset.kubernetes.io/pod-name: {{ $targetPod }} ---- -{{- end }} -{{- end }} -{{- end }} diff --git a/charts/clickhouse/templates/service-headless.yaml b/charts/clickhouse/templates/service-headless.yaml deleted file mode 100644 index f989841b..00000000 --- a/charts/clickhouse/templates/service-headless.yaml +++ /dev/null @@ -1,69 +0,0 @@ -{{- /* -Copyright VMware, Inc. -SPDX-License-Identifier: APACHE-2.0 -*/}} - -apiVersion: v1 -kind: Service -metadata: - name: {{ include "clickhouse.headlessServiceName" . }} - namespace: {{ include "common.names.namespace" . | quote }} - labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} - app.kubernetes.io/component: clickhouse - {{- if or .Values.service.headless.annotations .Values.commonAnnotations }} - {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.service.headless.annotations .Values.commonAnnotations ) "context" . ) }} - annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} - {{- end }} -spec: - type: ClusterIP - clusterIP: None - publishNotReadyAddresses: true - ports: - - name: http - targetPort: http - port: {{ .Values.service.ports.http }} - protocol: TCP - - name: tcp - targetPort: tcp - port: {{ .Values.service.ports.tcp }} - protocol: TCP - {{- if .Values.tls.enabled }} - - name: tcp-secure - targetPort: tcp-secure - port: {{ .Values.service.ports.tcpSecure }} - protocol: TCP - {{- end }} - {{- if .Values.keeper.enabled }} - - name: tcp-keeper - targetPort: tcp-keeper - port: {{ .Values.service.ports.keeper }} - protocol: TCP - - name: tcp-keeperinter - targetPort: tcp-keeperinter - port: {{ .Values.service.ports.keeperInter }} - protocol: TCP - {{- if .Values.tls.enabled }} - - name: tcp-keepertls - targetPort: tcp-keepertls - port: {{ .Values.service.ports.keeperSecure }} - protocol: TCP - {{- end }} - {{- end }} - - name: tcp-mysql - targetPort: tcp-mysql - port: {{ .Values.service.ports.mysql }} - protocol: TCP - - name: tcp-postgresql - targetPort: tcp-postgresql - port: {{ .Values.service.ports.postgresql }} - protocol: TCP - - name: http-intersrv - targetPort: http-intersrv - port: {{ .Values.service.ports.interserver }} - protocol: TCP - {{- if .Values.service.extraPorts }} - {{- include "common.tplvalues.render" (dict "value" .Values.service.extraPorts "context" $) | nindent 4 }} - {{- end }} - {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.podLabels .Values.commonLabels ) "context" . ) }} - selector: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 4 }} - app.kubernetes.io/component: clickhouse diff --git a/charts/clickhouse/templates/service.yaml b/charts/clickhouse/templates/service.yaml deleted file mode 100644 index f54e2268..00000000 --- a/charts/clickhouse/templates/service.yaml +++ /dev/null @@ -1,152 +0,0 @@ -{{- /* -Copyright VMware, Inc. -SPDX-License-Identifier: APACHE-2.0 -*/}} - -apiVersion: v1 -kind: Service -metadata: - name: {{ template "common.names.fullname" . }} - namespace: {{ include "common.names.namespace" . | quote }} - labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} - app.kubernetes.io/component: clickhouse - {{- if or .Values.service.annotations .Values.commonAnnotations }} - {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.service.annotations .Values.commonAnnotations ) "context" . ) }} - annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} - {{- end }} -spec: - type: {{ .Values.service.type }} - {{- if and .Values.service.clusterIP (eq .Values.service.type "ClusterIP") }} - clusterIP: {{ .Values.service.clusterIP }} - {{- end }} - {{- if .Values.service.sessionAffinity }} - sessionAffinity: {{ .Values.service.sessionAffinity }} - {{- end }} - {{- if .Values.service.sessionAffinityConfig }} - sessionAffinityConfig: {{- include "common.tplvalues.render" (dict "value" .Values.service.sessionAffinityConfig "context" $) | nindent 4 }} - {{- end }} - {{- if or (eq .Values.service.type "LoadBalancer") (eq .Values.service.type "NodePort") }} - externalTrafficPolicy: {{ .Values.service.externalTrafficPolicy | quote }} - {{- end }} - {{- if and (eq .Values.service.type "LoadBalancer") (not (empty .Values.service.loadBalancerSourceRanges)) }} - loadBalancerSourceRanges: {{- toYaml .Values.service.loadBalancerSourceRanges | nindent 4 }} - {{- end }} - {{- if and (eq .Values.service.type "LoadBalancer") (not (empty .Values.service.loadBalancerIP)) }} - loadBalancerIP: {{ .Values.service.loadBalancerIP }} - {{- end }} - ports: - - name: http - targetPort: http - port: {{ .Values.service.ports.http }} - protocol: TCP - {{- if and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty .Values.service.nodePorts.http)) }} - nodePort: {{ .Values.service.nodePorts.http }} - {{- else if eq .Values.service.type "ClusterIP" }} - nodePort: null - {{- end }} - {{- if .Values.tls.enabled }} - - name: https - targetPort: https - port: {{ .Values.service.ports.https }} - protocol: TCP - {{- if and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty .Values.service.nodePorts.https)) }} - nodePort: {{ .Values.service.nodePorts.https }} - {{- else if eq .Values.service.type "ClusterIP" }} - nodePort: null - {{- end }} - {{- end }} - - name: tcp - targetPort: tcp - port: {{ .Values.service.ports.tcp }} - protocol: TCP - {{- if and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty .Values.service.nodePorts.tcp)) }} - nodePort: {{ .Values.service.nodePorts.tcp }} - {{- else if eq .Values.service.type "ClusterIP" }} - nodePort: null - {{- end }} - {{- if .Values.tls.enabled }} - - name: tcp-secure - targetPort: tcp-secure - port: {{ .Values.service.ports.tcpSecure }} - protocol: TCP - {{- if and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty .Values.service.nodePorts.tcpSecure)) }} - nodePort: {{ .Values.service.nodePorts.tcpSecure }} - {{- else if eq .Values.service.type "ClusterIP" }} - nodePort: null - {{- end }} - {{- end }} - {{- if .Values.keeper.enabled }} - - name: tcp-keeper - targetPort: tcp-keeper - port: {{ .Values.service.ports.keeper }} - protocol: TCP - {{- if and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty .Values.service.nodePorts.tcp)) }} - nodePort: {{ .Values.service.nodePorts.keeper }} - {{- else if eq .Values.service.type "ClusterIP" }} - nodePort: null - {{- end }} - - name: tcp-keeperinter - targetPort: tcp-keeperinter - port: {{ .Values.service.ports.keeperInter }} - protocol: TCP - {{- if and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty .Values.service.nodePorts.tcp)) }} - nodePort: {{ .Values.service.nodePorts.keeperInter }} - {{- else if eq .Values.service.type "ClusterIP" }} - nodePort: null - {{- end }} - {{- if .Values.tls.enabled }} - - name: tcp-keepertls - targetPort: tcp-keepertls - port: {{ .Values.service.ports.keeperSecure }} - protocol: TCP - {{- if and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty .Values.service.nodePorts.tcpSecure)) }} - nodePort: {{ .Values.service.nodePorts.keeperSecure }} - {{- else if eq .Values.service.type "ClusterIP" }} - nodePort: null - {{- end }} - {{- end }} - {{- end }} - - name: tcp-mysql - targetPort: tcp-mysql - port: {{ .Values.service.ports.mysql }} - protocol: TCP - {{- if and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty .Values.service.nodePorts.mysql)) }} - nodePort: {{ .Values.service.nodePorts.mysql }} - {{- else if eq .Values.service.type "ClusterIP" }} - nodePort: null - {{- end }} - - name: tcp-postgresql - targetPort: tcp-postgresql - port: {{ .Values.service.ports.postgresql }} - protocol: TCP - {{- if and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty .Values.service.nodePorts.postgresql)) }} - nodePort: {{ .Values.service.nodePorts.postgresql }} - {{- else if eq .Values.service.type "ClusterIP" }} - nodePort: null - {{- end }} - - name: http-intersrv - targetPort: http-intersrv - port: {{ .Values.service.ports.interserver }} - protocol: TCP - {{- if and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty .Values.service.nodePorts.interserver)) }} - nodePort: {{ .Values.service.nodePorts.interserver }} - {{- else if eq .Values.service.type "ClusterIP" }} - nodePort: null - {{- end }} - {{- if .Values.metrics.enabled }} - - name: http-metrics - targetPort: http-metrics - port: {{ .Values.service.ports.metrics }} - protocol: TCP - {{- if and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty .Values.service.nodePorts.metrics)) }} - nodePort: {{ .Values.service.nodePorts.metrics }} - {{- else if eq .Values.service.type "ClusterIP" }} - nodePort: null - {{- end }} - {{- end }} - {{- if .Values.service.extraPorts }} - {{- include "common.tplvalues.render" (dict "value" .Values.service.extraPorts "context" $) | nindent 4 }} - {{- end }} - {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.podLabels .Values.commonLabels ) "context" . ) }} - selector: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 4 }} - app.kubernetes.io/component: clickhouse diff --git a/charts/clickhouse/templates/servicemonitor.yaml b/charts/clickhouse/templates/servicemonitor.yaml deleted file mode 100644 index 2148b375..00000000 --- a/charts/clickhouse/templates/servicemonitor.yaml +++ /dev/null @@ -1,47 +0,0 @@ -{{- /* -Copyright VMware, Inc. -SPDX-License-Identifier: APACHE-2.0 -*/}} - -{{- if and .Values.metrics.enabled .Values.metrics.serviceMonitor.enabled }} -apiVersion: monitoring.coreos.com/v1 -kind: ServiceMonitor -metadata: - name: {{ include "common.names.fullname" . }} - namespace: {{ default (include "common.names.namespace" .) .Values.metrics.serviceMonitor.namespace | quote }} - {{- $labels := include "common.tplvalues.merge" ( dict "values" ( list .Values.metrics.serviceMonitor.labels .Values.commonLabels ) "context" . ) }} - labels: {{- include "common.labels.standard" ( dict "customLabels" $labels "context" $ ) | nindent 4 }} - app.kubernetes.io/component: clickhouse - {{- if or .Values.metrics.serviceMonitor.annotations .Values.commonAnnotations }} - {{- $annotations := include "common.tplvalues.merge" ( dict "values" ( list .Values.metrics.serviceMonitor.annotations .Values.commonAnnotations ) "context" . ) }} - annotations: {{- include "common.tplvalues.render" ( dict "value" $annotations "context" $) | nindent 4 }} - {{- end }} -spec: - jobLabel: {{ .Values.metrics.serviceMonitor.jobLabel | quote }} - selector: - matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 6 }} - {{- if .Values.metrics.serviceMonitor.selector }} - {{- include "common.tplvalues.render" (dict "value" .Values.metrics.serviceMonitor.selector "context" $) | nindent 6 }} - {{- end }} - endpoints: - - port: http-metrics - path: "/metrics" - {{- if .Values.metrics.serviceMonitor.interval }} - interval: {{ .Values.metrics.serviceMonitor.interval }} - {{- end }} - {{- if .Values.metrics.serviceMonitor.scrapeTimeout }} - scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }} - {{- end }} - {{- if .Values.metrics.serviceMonitor.honorLabels }} - honorLabels: {{ .Values.metrics.serviceMonitor.honorLabels }} - {{- end }} - {{- if .Values.metrics.serviceMonitor.metricRelabelings }} - metricRelabelings: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.metricRelabelings "context" $) | nindent 8 }} - {{- end }} - {{- if .Values.metrics.serviceMonitor.relabelings }} - relabelings: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.relabelings "context" $) | nindent 8 }} - {{- end }} - namespaceSelector: - matchNames: - - {{ include "common.names.namespace" . | quote }} -{{- end }} diff --git a/charts/clickhouse/templates/start-scripts-secret.yaml b/charts/clickhouse/templates/start-scripts-secret.yaml deleted file mode 100644 index c579f2e4..00000000 --- a/charts/clickhouse/templates/start-scripts-secret.yaml +++ /dev/null @@ -1,19 +0,0 @@ -{{- /* -Copyright VMware, Inc. -SPDX-License-Identifier: APACHE-2.0 -*/}} - -{{- if and .Values.startdbScripts (not .Values.startdbScriptsSecret) }} -apiVersion: v1 -kind: Secret -metadata: - name: {{ printf "%s-start-scripts" (include "common.names.fullname" .) }} - namespace: {{ include "common.names.namespace" . | quote }} - labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} - app.kubernetes.io/component: clickhouse - {{- if .Values.commonAnnotations }} - annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} - {{- end }} -stringData: -{{- include "common.tplvalues.render" (dict "value" .Values.startdbScripts "context" .) | nindent 2 }} -{{- end }} diff --git a/charts/clickhouse/templates/statefulset.yaml b/charts/clickhouse/templates/statefulset.yaml deleted file mode 100644 index 13d526ca..00000000 --- a/charts/clickhouse/templates/statefulset.yaml +++ /dev/null @@ -1,425 +0,0 @@ -{{- /* -Copyright VMware, Inc. -SPDX-License-Identifier: APACHE-2.0 -*/}} - -{{- $shards := .Values.shards | int }} -{{- range $i, $e := until $shards }} -apiVersion: {{ include "common.capabilities.statefulset.apiVersion" $ }} -kind: StatefulSet -metadata: - name: {{ printf "%s-shard%d" (include "common.names.fullname" $ ) $i }} - namespace: {{ include "common.names.namespace" $ | quote }} - labels: {{- include "common.labels.standard" ( dict "customLabels" $.Values.commonLabels "context" $ ) | nindent 4 }} - app.kubernetes.io/component: clickhouse - {{- if $.Values.commonAnnotations }} - annotations: {{- include "common.tplvalues.render" ( dict "value" $.Values.commonAnnotations "context" $ ) | nindent 4 }} - {{- end }} -spec: - replicas: {{ $.Values.replicaCount }} - podManagementPolicy: {{ $.Values.podManagementPolicy | quote }} - {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list $.Values.podLabels $.Values.commonLabels ) "context" $ ) }} - selector: - matchLabels: {{- include "common.labels.matchLabels" ( dict "customLabels" $podLabels "context" $ ) | nindent 6 }} - app.kubernetes.io/component: clickhouse - serviceName: {{ printf "%s-headless" (include "common.names.fullname" $) }} - {{- if $.Values.updateStrategy }} - updateStrategy: {{- toYaml $.Values.updateStrategy | nindent 4 }} - {{- end }} - template: - metadata: - annotations: - checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") $ | sha256sum }} - checksum/config-extra: {{ include (print $.Template.BasePath "/configmap-extra.yaml") $ | sha256sum }} - checksum/config-users-extra: {{ include (print $.Template.BasePath "/configmap-users-extra.yaml") $ | sha256sum }} - {{- if $.Values.podAnnotations }} - {{- include "common.tplvalues.render" (dict "value" $.Values.podAnnotations "context" $) | nindent 8 }} - {{- end }} - {{- if and $.Values.metrics.enabled $.Values.metrics.podAnnotations }} - {{- include "common.tplvalues.render" (dict "value" $.Values.metrics.podAnnotations "context" $) | nindent 8 }} - {{- end }} - labels: {{- include "common.labels.standard" ( dict "customLabels" $podLabels "context" $ ) | nindent 8 }} - app.kubernetes.io/component: clickhouse - shard: {{ $i | quote }} - spec: - serviceAccountName: {{ template "clickhouse.serviceAccountName" $ }} - {{- include "clickhouse.imagePullSecrets" $ | nindent 6 }} - {{- if $.Values.hostAliases }} - hostAliases: {{- include "common.tplvalues.render" (dict "value" $.Values.hostAliases "context" $) | nindent 8 }} - {{- end }} - {{- if $.Values.affinity }} - affinity: {{- include "common.tplvalues.render" ( dict "value" $.Values.affinity "context" $) | nindent 8 }} - {{- else }} - affinity: - podAffinity: {{- include "common.affinities.pods" (dict "type" $.Values.podAffinityPreset "component" "clickhouse" "customLabels" $podLabels "context" $) | nindent 10 }} - podAntiAffinity: {{- include "common.affinities.pods" (dict "type" $.Values.podAntiAffinityPreset "component" "clickhouse" "customLabels" $podLabels "extraPodAffinityTerms" (ternary (list (dict "extraMatchLabels" (dict "shard" $i) "topologyKey" "topology.kubernetes.io/zone")) (list) $.Values.distributeReplicasByZone) "context" $) | nindent 10 }} - nodeAffinity: {{- include "common.affinities.nodes" (dict "type" $.Values.nodeAffinityPreset.type "key" $.Values.nodeAffinityPreset.key "values" $.Values.nodeAffinityPreset.values) | nindent 10 }} - {{- end }} - {{- if $.Values.nodeSelector }} - nodeSelector: {{- include "common.tplvalues.render" ( dict "value" $.Values.nodeSelector "context" $) | nindent 8 }} - {{- end }} - {{- if $.Values.tolerations }} - tolerations: {{- include "common.tplvalues.render" (dict "value" $.Values.tolerations "context" $) | nindent 8 }} - {{- end }} - {{- if $.Values.priorityClassName }} - priorityClassName: {{ $.Values.priorityClassName | quote }} - {{- end }} - {{- if $.Values.schedulerName }} - schedulerName: {{ $.Values.schedulerName | quote }} - {{- end }} - {{- if $.Values.topologySpreadConstraints }} - topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" $.Values.topologySpreadConstraints "context" $) | nindent 8 }} - {{- end }} - {{- if $.Values.podSecurityContext.enabled }} - securityContext: {{- omit $.Values.podSecurityContext "enabled" | toYaml | nindent 8 }} - {{- end }} - {{- if $.Values.terminationGracePeriodSeconds }} - terminationGracePeriodSeconds: {{ $.Values.terminationGracePeriodSeconds }} - {{- end }} - initContainers: - {{- if and $.Values.tls.enabled (not $.Values.volumePermissions.enabled) }} - - name: copy-certs - image: {{ include "clickhouse.volumePermissions.image" $ }} - imagePullPolicy: {{ $.Values.volumePermissions.image.pullPolicy | quote }} - {{- if $.Values.resources }} - resources: {{- toYaml $.Values.resources | nindent 12 }} - {{- end }} - {{- if $.Values.containerSecurityContext.enabled }} - # We don't require a privileged container in this case - securityContext: {{- omit $.Values.containerSecurityContext "enabled" | toYaml | nindent 12 }} - {{- end }} - command: - - /bin/sh - - -ec - - | - cp -L /tmp/certs/* /opt/bitnami/clickhouse/certs/ - chmod 600 {{ include "clickhouse.tlsCertKey" $ }} - volumeMounts: - - name: raw-certificates - mountPath: /tmp/certs - - name: clickhouse-certificates - mountPath: /opt/bitnami/clickhouse/certs - {{- else if and $.Values.volumePermissions.enabled $.Values.persistence.enabled }} - - name: volume-permissions - image: {{ include "clickhouse.volumePermissions.image" $ }} - imagePullPolicy: {{ $.Values.volumePermissions.image.pullPolicy | quote }} - command: - - /bin/sh - - -ec - - | - mkdir -p /bitnami/clickhouse/data - chmod 700 /bitnami/clickhouse/data - {{- if $.Values.keeper.enabled }} - mkdir -p /bitnami/clickhouse/keeper - chmod 700 /bitnami/clickhouse/keeper - {{- end }} - chown {{ $.Values.containerSecurityContext.runAsUser }}:{{ $.Values.podSecurityContext.fsGroup }} /bitnami/clickhouse - find /bitnami/clickhouse -mindepth 1 -maxdepth 1 -not -name ".snapshot" -not -name "lost+found" | \ - xargs -r chown -R {{ $.Values.containerSecurityContext.runAsUser }}:{{ $.Values.podSecurityContext.fsGroup }} - {{- if $.Values.tls.enabled }} - cp /tmp/certs/* /opt/bitnami/clickhouse/certs/ - {{- if eq ( toString ( $.Values.volumePermissions.containerSecurityContext.runAsUser )) "auto" }} - chown -R `id -u`:`id -G | cut -d " " -f2` /opt/bitnami/clickhouse/certs/ - {{- else }} - chown -R {{ $.Values.containerSecurityContext.runAsUser }}:{{ $.Values.podSecurityContext.fsGroup }} /opt/bitnami/clickhouse/certs/ - {{- end }} - chmod 600 {{ include "clickhouse.tlsCertKey" $ }} - {{- end }} - securityContext: {{- include "common.tplvalues.render" (dict "value" $.Values.volumePermissions.containerSecurityContext "context" $) | nindent 12 }} - {{- if $.Values.volumePermissions.resources }} - resources: {{- toYaml $.Values.volumePermissions.resources | nindent 12 }} - {{- end }} - volumeMounts: - - name: data - mountPath: /bitnami/clickhouse - {{- if $.Values.tls.enabled }} - - name: raw-certificates - mountPath: /tmp/certs - - name: clickhouse-certificates - mountPath: /opt/bitnami/clickhouse/certs - {{- end }} - {{- end }} - {{- if $.Values.initContainers }} - {{- include "common.tplvalues.render" (dict "value" $.Values.initContainers "context" $) | nindent 8 }} - {{- end }} - containers: - - name: clickhouse - image: {{ template "clickhouse.image" $ }} - imagePullPolicy: {{ $.Values.image.pullPolicy }} - {{- if $.Values.containerSecurityContext.enabled }} - securityContext: {{- omit $.Values.containerSecurityContext "enabled" | toYaml | nindent 12 }} - {{- end }} - {{- if $.Values.diagnosticMode.enabled }} - command: {{- include "common.tplvalues.render" (dict "value" $.Values.diagnosticMode.command "context" $) | nindent 12 }} - {{- else if $.Values.command }} - command: {{- include "common.tplvalues.render" (dict "value" $.Values.command "context" $) | nindent 12 }} - {{- end }} - {{- if $.Values.diagnosticMode.enabled }} - args: {{- include "common.tplvalues.render" (dict "value" $.Values.diagnosticMode.args "context" $) | nindent 12 }} - {{- else if $.Values.args }} - args: {{- include "common.tplvalues.render" (dict "value" $.Values.args "context" $) | nindent 12 }} - {{- end }} - env: - - name: BITNAMI_DEBUG - value: {{ ternary "true" "false" (or $.Values.image.debug $.Values.diagnosticMode.enabled) | quote }} - - name: CLICKHOUSE_HTTP_PORT - value: {{ $.Values.containerPorts.http | quote }} - - name: CLICKHOUSE_TCP_PORT - value: {{ $.Values.containerPorts.tcp | quote }} - - name: CLICKHOUSE_MYSQL_PORT - value: {{ $.Values.containerPorts.mysql | quote }} - - name: CLICKHOUSE_POSTGRESQL_PORT - value: {{ $.Values.containerPorts.postgresql | quote }} - - name: CLICKHOUSE_INTERSERVER_HTTP_PORT - value: {{ $.Values.containerPorts.interserver | quote }} - {{- if $.Values.tls.enabled }} - - name: CLICKHOUSE_TCP_SECURE_PORT - value: {{ $.Values.containerPorts.tcpSecure | quote }} - - name: CLICKHOUSE_HTTPS_PORT - value: {{ $.Values.containerPorts.https | quote }} - {{- end }} - {{- if $.Values.keeper.enabled }} - - name: CLICKHOUSE_KEEPER_PORT - value: {{ $.Values.containerPorts.keeper | quote }} - - name: CLICKHOUSE_KEEPER_INTER_PORT - value: {{ $.Values.containerPorts.keeperInter | quote }} - {{- if $.Values.tls.enabled }} - - name: CLICKHOUSE_KEEPER_SECURE_PORT - value: {{ $.Values.containerPorts.keeperSecure | quote }} - {{- end }} - {{- end }} - {{- if $.Values.metrics.enabled }} - - name: CLICKHOUSE_METRICS_PORT - value: {{ $.Values.containerPorts.metrics | quote }} - {{- end }} - - name: CLICKHOUSE_SHARD_ID - value: {{ printf "shard%d" $i | quote }} - - name: CLICKHOUSE_REPLICA_ID - valueFrom: - fieldRef: - fieldPath: metadata.name - - name: ALLOW_EMPTY_PASSWORD - value: "yes" - {{- if $.Values.tls.enabled }} - - name: CLICKHOUSE_TLS_CERT_FILE - value: {{ include "clickhouse.tlsCert" $ | quote}} - - name: CLICKHOUSE_TLS_KEY_FILE - value: {{ include "clickhouse.tlsCertKey" $ | quote }} - - name: CLICKHOUSE_TLS_CA_FILE - value: {{ include "clickhouse.tlsCACert" $ | quote }} - {{- end }} - {{- if $.Values.extraEnvVars }} - {{- include "common.tplvalues.render" (dict "value" $.Values.extraEnvVars "context" $) | nindent 12 }} - {{- end }} - {{- if $.Values.keeper.enabled }} - {{- $replicas := $.Values.replicaCount | int }} - {{- range $j, $r := until $replicas }} - - name: {{ printf "KEEPER_NODE_%d" $j }} - value: {{ printf "%s-shard%d-%d.%s.%s.svc.%s" (include "common.names.fullname" $ ) $i $j (include "clickhouse.headlessServiceName" $) (include "common.names.namespace" $) $.Values.clusterDomain }} - {{- end }} - {{- else if $.Values.zookeeper.enabled }} - {{- $replicas := $.Values.zookeeper.replicaCount | int }} - {{- range $j, $r := until $replicas }} - - name: {{ printf "KEEPER_NODE_%d" $j }} - value: {{ printf "%s-%d.%s.%s.svc.%s" (include "clickhouse.zookeeper.fullname" $ ) $j (include "clickhouse.zookeeper.headlessServiceName" $) (include "common.names.namespace" $) $.Values.clusterDomain }} - {{- end }} - {{- end }} - envFrom: - {{- if $.Values.extraEnvVarsCM }} - - configMapRef: - name: {{ include "common.tplvalues.render" (dict "value" $.Values.extraEnvVarsCM "context" $) }} - {{- end }} - {{- if $.Values.extraEnvVarsSecret }} - - secretRef: - name: {{ include "common.tplvalues.render" (dict "value" $.Values.extraEnvVarsSecret "context" $) }} - {{- end }} - {{- if $.Values.resources }} - resources: {{- toYaml $.Values.resources | nindent 12 }} - {{- end }} - ports: - - name: http - containerPort: {{ $.Values.containerPorts.http }} - - name: tcp - containerPort: {{ $.Values.containerPorts.tcp }} - {{- if $.Values.tls.enabled }} - - name: https - containerPort: {{ $.Values.containerPorts.https }} - - name: tcp-secure - containerPort: {{ $.Values.containerPorts.tcpSecure }} - {{- end }} - {{- if $.Values.keeper.enabled }} - - name: tcp-keeper - containerPort: {{ $.Values.containerPorts.keeper }} - - name: tcp-keeperinter - containerPort: {{ $.Values.containerPorts.keeperInter }} - {{- if $.Values.tls.enabled }} - - name: tcp-keepertls - containerPort: {{ $.Values.containerPorts.keeperSecure }} - {{- end }} - {{- end }} - - name: tcp-postgresql - containerPort: {{ $.Values.containerPorts.postgresql }} - - name: tcp-mysql - containerPort: {{ $.Values.containerPorts.mysql }} - - name: http-intersrv - containerPort: {{ $.Values.containerPorts.interserver }} - {{- if $.Values.metrics.enabled }} - - name: http-metrics - containerPort: {{ $.Values.containerPorts.metrics }} - {{- end }} - {{- if not $.Values.diagnosticMode.enabled }} - {{- if $.Values.customLivenessProbe }} - livenessProbe: {{- include "common.tplvalues.render" (dict "value" $.Values.customLivenessProbe "context" $) | nindent 12 }} - {{- else if $.Values.livenessProbe.enabled }} - livenessProbe: {{- include "common.tplvalues.render" (dict "value" (omit $.Values.livenessProbe "enabled") "context" $) | nindent 12 }} - httpGet: - path: /ping - port: http - {{- end }} - {{- if $.Values.customReadinessProbe }} - readinessProbe: {{- include "common.tplvalues.render" (dict "value" $.Values.customReadinessProbe "context" $) | nindent 12 }} - {{- else if $.Values.readinessProbe.enabled }} - readinessProbe: {{- include "common.tplvalues.render" (dict "value" (omit $.Values.readinessProbe "enabled") "context" $) | nindent 12 }} - httpGet: - path: /ping - port: http - {{- end }} - {{- if $.Values.customStartupProbe }} - startupProbe: {{- include "common.tplvalues.render" (dict "value" $.Values.customStartupProbe "context" $) | nindent 12 }} - {{- else if $.Values.startupProbe.enabled }} - startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit $.Values.startupProbe "enabled") "context" $) | nindent 12 }} - httpGet: - path: /ping - port: http - {{- end }} - {{- end }} - {{- if $.Values.lifecycleHooks }} - lifecycle: {{- include "common.tplvalues.render" (dict "value" $.Values.lifecycleHooks "context" $) | nindent 12 }} - {{- end }} - volumeMounts: - - name: scripts - mountPath: /scripts/setup.sh - subPath: setup.sh - - name: data - mountPath: /bitnami/clickhouse - - name: config - mountPath: /bitnami/clickhouse/etc/conf.d/default - {{- if or $.Values.extraOverridesConfigmap $.Values.extraOverrides }} - - name: extra-config - mountPath: /bitnami/clickhouse/etc/conf.d/extra-configmap - {{- end }} - {{- if or $.Values.usersExtraOverridesConfigmap $.Values.usersExtraOverrides }} - - name: users-extra-config - mountPath: /bitnami/clickhouse/etc/users.d/users-extra-configmap - {{- end }} - {{- if $.Values.extraOverridesSecret }} - - name: extra-secret - mountPath: /bitnami/clickhouse/etc/conf.d/extra-secret - {{- end }} - {{- if $.Values.usersExtraOverridesSecret }} - - name: users-extra-secret - mountPath: /bitnami/clickhouse/etc/users.d/users-extra-secret - {{- end }} - {{- if $.Values.tls.enabled }} - - name: clickhouse-certificates - mountPath: /bitnami/clickhouse/certs - {{- end }} - {{- if or $.Values.initdbScriptsSecret $.Values.initdbScripts }} - - name: custom-init-scripts - mountPath: /docker-entrypoint-initdb.d - {{- end }} - {{- if or $.Values.startdbScriptsSecret $.Values.startdbScripts }} - - name: custom-start-scripts - mountPath: /docker-entrypoint-startdb.d - {{- end }} - {{- if $.Values.extraVolumeMounts }} - {{- include "common.tplvalues.render" (dict "value" $.Values.extraVolumeMounts "context" $) | nindent 12 }} - {{- end }} - {{- if $.Values.sidecars }} - {{- include "common.tplvalues.render" ( dict "value" $.Values.sidecars "context" $) | nindent 8 }} - {{- end }} - volumes: - - name: scripts - configMap: - name: {{ printf "%s-scripts" (include "common.names.fullname" $) }} - defaultMode: 0755 - - name: config - configMap: - name: {{ template "clickhouse.configmapName" $ }} - {{- if or $.Values.initdbScriptsSecret $.Values.initdbScripts }} - - name: custom-init-scripts - secret: - secretName: {{ include "clickhouse.initdbScriptsSecret" $ }} - {{- end }} - {{- if or $.Values.startdbScriptsSecret $.Values.startdbScripts }} - - name: custom-start-scripts - secret: - secretName: {{ include "clickhouse.startdbScriptsSecret" $ }} - {{- end }} - {{- if or $.Values.extraOverridesConfigmap $.Values.extraOverrides }} - - name: extra-config - configMap: - name: {{ template "clickhouse.extraConfigmapName" $ }} - {{- end }} - {{- if or $.Values.usersExtraOverridesConfigmap $.Values.usersExtraOverrides }} - - name: users-extra-config - configMap: - name: {{ template "clickhouse.usersExtraConfigmapName" $ }} - {{- end }} - {{- if $.Values.extraOverridesSecret }} - - name: extra-secret - secret: - secretName: {{ $.Values.extraOverridesSecret }} - {{- end }} - {{- if $.Values.usersExtraOverridesSecret }} - - name: users-extra-secret - secret: - secretName: {{ $.Values.usersExtraOverridesSecret }} - {{- end }} - {{- if not $.Values.persistence.enabled }} - - name: data - emptyDir: {} - {{- else if $.Values.persistence.existingClaim }} - - name: data - persistentVolumeClaim: - claimName: {{ tpl $.Values.persistence.existingClaim $ }} - {{- end }} - {{- if $.Values.tls.enabled }} - - name: raw-certificates - secret: - secretName: {{ include "clickhouse.tlsSecretName" $ }} - - name: clickhouse-certificates - emptyDir: {} - {{- end }} - {{- if $.Values.extraVolumes }} - {{- include "common.tplvalues.render" (dict "value" $.Values.extraVolumes "context" $) | nindent 8 }} - {{- end }} - {{- if and $.Values.persistence.enabled (not $.Values.persistence.existingClaim) }} - volumeClaimTemplates: - - metadata: - name: data - {{- if or $.Values.persistence.annotations $.Values.commonAnnotations }} - {{- $claimAnnotations := include "common.tplvalues.merge" ( dict "values" ( list $.Values.persistence.annotations $.Values.commonLabels ) "context" $ ) }} - annotations: {{- include "common.tplvalues.render" ( dict "value" $claimAnnotations "context" $ ) | nindent 10 }} - {{- end }} - {{- $claimLabels := include "common.tplvalues.merge" ( dict "values" ( list $.Values.persistence.labels $.Values.commonLabels ) "context" $ ) }} - labels: {{- include "common.labels.matchLabels" ( dict "customLabels" $claimLabels "context" $ ) | nindent 10 }} - app.kubernetes.io/component: clickhouse - spec: - accessModes: - {{- range $.Values.persistence.accessModes }} - - {{ . | quote }} - {{- end }} - resources: - requests: - storage: {{ $.Values.persistence.size | quote }} - {{- if $.Values.persistence.selector }} - selector: {{- include "common.tplvalues.render" (dict "value" $.Values.persistence.selector "context" $) | nindent 10 }} - {{- end }} - {{- if $.Values.persistence.dataSource }} - dataSource: {{- include "common.tplvalues.render" (dict "value" $.Values.persistence.dataSource "context" $) | nindent 10 }} - {{- end }} - {{- include "common.storage.class" (dict "persistence" $.Values.persistence "global" $.Values.global) | nindent 8 }} - {{- end }} ---- -{{- end }} diff --git a/charts/clickhouse/templates/tls-secret.yaml b/charts/clickhouse/templates/tls-secret.yaml deleted file mode 100644 index 04b188e1..00000000 --- a/charts/clickhouse/templates/tls-secret.yaml +++ /dev/null @@ -1,29 +0,0 @@ -{{- /* -Copyright VMware, Inc. -SPDX-License-Identifier: APACHE-2.0 -*/}} - -{{- if (include "clickhouse.createTlsSecret" . ) }} -{{- $secretName := printf "%s-crt" (include "common.names.fullname" .) }} -{{- $ca := genCA "clickhouse-ca" 365 }} -{{- $fullname := include "common.names.fullname" . }} -{{- $releaseNamespace := .Release.Namespace }} -{{- $clusterDomain := .Values.clusterDomain }} -{{- $primaryHeadlessServiceName := printf "%s-headless" (include "common.names.fullname" .)}} -{{- $altNames := list (printf "*.%s.%s.svc.%s" $fullname $releaseNamespace $clusterDomain) (printf "%s.%s.svc.%s" $fullname $releaseNamespace $clusterDomain) (printf "*.%s.%s.svc.%s" $primaryHeadlessServiceName $releaseNamespace $clusterDomain) (printf "%s.%s.svc.%s" $primaryHeadlessServiceName $releaseNamespace $clusterDomain) $fullname }} -{{- $cert := genSignedCert $fullname nil $altNames 365 $ca }} -apiVersion: v1 -kind: Secret -metadata: - name: {{ $secretName }} - namespace: {{ .Release.Namespace | quote }} - labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} - {{- if .Values.commonAnnotations }} - annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} - {{- end }} -type: kubernetes.io/tls -data: - tls.crt: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "tls.crt" "defaultValue" $cert.Cert "context" $) }} - tls.key: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "tls.key" "defaultValue" $cert.Key "context" $) }} - ca.crt: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "ca.crt" "defaultValue" $ca.Cert "context" $) }} -{{- end }} diff --git a/charts/clickhouse/values.yaml b/charts/clickhouse/values.yaml deleted file mode 100644 index 444f13fa..00000000 --- a/charts/clickhouse/values.yaml +++ /dev/null @@ -1,1131 +0,0 @@ -# Copyright VMware, Inc. -# SPDX-License-Identifier: APACHE-2.0 - -## @section Global parameters -## Global Docker image parameters -## Please, note that this will override the image parameters, including dependencies, configured to use the global value -## Current available global Docker image parameters: imageRegistry, imagePullSecrets and storageClass -## - -## @param global.imageRegistry Global Docker image registry -## @param global.imagePullSecrets Global Docker registry secret names as an array -## @param global.storageClass Global StorageClass for Persistent Volume(s) -## -global: - imageRegistry: "" - ## E.g. - ## imagePullSecrets: - ## - myRegistryKeySecretName - ## - imagePullSecrets: [] - storageClass: "" - -## @section Common parameters -## - -## @param kubeVersion Override Kubernetes version -## -kubeVersion: "" -## @param nameOverride String to partially override common.names.name -## -nameOverride: "" -## @param fullnameOverride String to fully override common.names.fullname -## -fullnameOverride: "" -## @param namespaceOverride String to fully override common.names.namespace -## -namespaceOverride: "" -## @param commonLabels Labels to add to all deployed objects -## -commonLabels: {} -## @param commonAnnotations Annotations to add to all deployed objects -## -commonAnnotations: {} -## @param clusterDomain Kubernetes cluster domain name -## -clusterDomain: cluster.local -## @param extraDeploy Array of extra objects to deploy with the release -## -extraDeploy: [] - -## Enable diagnostic mode in the deployment -## -diagnosticMode: - ## @param diagnosticMode.enabled Enable diagnostic mode (all probes will be disabled and the command will be overridden) - ## - enabled: false - ## @param diagnosticMode.command Command to override all containers in the deployment - ## - command: - - sleep - ## @param diagnosticMode.args Args to override all containers in the deployment - ## - args: - - infinity - -## @section ClickHouse Parameters -## - -## Bitnami ClickHouse image -## ref: https://hub.docker.com/r/bitnami/clickhouse/tags/ -## @param image.registry [default: REGISTRY_NAME] ClickHouse image registry -## @param image.repository [default: REPOSITORY_NAME/clickhouse] ClickHouse image repository -## @skip image.tag ClickHouse image tag (immutable tags are recommended) -## @param image.digest ClickHouse image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag -## @param image.pullPolicy ClickHouse image pull policy -## @param image.pullSecrets ClickHouse image pull secrets -## @param image.debug Enable ClickHouse image debug mode -## -image: - registry: docker.io - repository: bitnami/clickhouse - tag: 23.10.5-debian-11-r0 - digest: "" - ## Specify a imagePullPolicy - ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' - ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images - ## - pullPolicy: IfNotPresent - ## Optionally specify an array of imagePullSecrets. - ## Secrets must be manually created in the namespace. - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ - ## e.g: - ## pullSecrets: - ## - myRegistryKeySecretName - ## - pullSecrets: [] - ## Enable debug mode - ## - debug: false -## @param shards Number of ClickHouse shards to deploy -## -shards: 2 - -## @param replicaCount Number of ClickHouse replicas per shard to deploy -## if keeper enable, same as keeper count, keeper cluster by shards. -## -replicaCount: 3 - -## @param distributeReplicasByZone Schedules replicas of the same shard to different availability zones -## -distributeReplicasByZone: false -## @param containerPorts.http ClickHouse HTTP container port -## @param containerPorts.https ClickHouse HTTPS container port -## @param containerPorts.tcp ClickHouse TCP container port -## @param containerPorts.tcpSecure ClickHouse TCP (secure) container port -## @param containerPorts.keeper ClickHouse keeper TCP container port -## @param containerPorts.keeperSecure ClickHouse keeper TCP (secure) container port -## @param containerPorts.keeperInter ClickHouse keeper interserver TCP container port -## @param containerPorts.mysql ClickHouse MySQL container port -## @param containerPorts.postgresql ClickHouse PostgreSQL container port -## @param containerPorts.interserver ClickHouse Interserver container port -## @param containerPorts.metrics ClickHouse metrics container port -## -containerPorts: - http: 8123 - https: 8443 - tcp: 9000 - tcpSecure: 9440 - keeper: 2181 - keeperSecure: 3181 - keeperInter: 9444 - mysql: 9004 - postgresql: 9005 - interserver: 9009 - metrics: 8001 -## Configure extra options for ClickHouse containers' liveness and readiness probes -## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes -## @param livenessProbe.enabled Enable livenessProbe on ClickHouse containers -## @param livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe -## @param livenessProbe.periodSeconds Period seconds for livenessProbe -## @param livenessProbe.timeoutSeconds Timeout seconds for livenessProbe -## @param livenessProbe.failureThreshold Failure threshold for livenessProbe -## @param livenessProbe.successThreshold Success threshold for livenessProbe -## -livenessProbe: - enabled: true - failureThreshold: 3 - initialDelaySeconds: 10 - periodSeconds: 10 - successThreshold: 1 - timeoutSeconds: 1 -## @param readinessProbe.enabled Enable readinessProbe on ClickHouse containers -## @param readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe -## @param readinessProbe.periodSeconds Period seconds for readinessProbe -## @param readinessProbe.timeoutSeconds Timeout seconds for readinessProbe -## @param readinessProbe.failureThreshold Failure threshold for readinessProbe -## @param readinessProbe.successThreshold Success threshold for readinessProbe -## -readinessProbe: - enabled: true - failureThreshold: 3 - initialDelaySeconds: 10 - periodSeconds: 10 - successThreshold: 1 - timeoutSeconds: 1 -## @param startupProbe.enabled Enable startupProbe on ClickHouse containers -## @param startupProbe.initialDelaySeconds Initial delay seconds for startupProbe -## @param startupProbe.periodSeconds Period seconds for startupProbe -## @param startupProbe.timeoutSeconds Timeout seconds for startupProbe -## @param startupProbe.failureThreshold Failure threshold for startupProbe -## @param startupProbe.successThreshold Success threshold for startupProbe -## -startupProbe: - enabled: false - failureThreshold: 3 - initialDelaySeconds: 10 - periodSeconds: 10 - successThreshold: 1 - timeoutSeconds: 1 -## @param customLivenessProbe Custom livenessProbe that overrides the default one -## -customLivenessProbe: {} -## @param customReadinessProbe Custom readinessProbe that overrides the default one -## -customReadinessProbe: {} -## @param customStartupProbe Custom startupProbe that overrides the default one -## -customStartupProbe: {} -## ClickHouse resource requests and limits -## ref: http://kubernetes.io/docs/user-guide/compute-resources/ -## @param resources.limits The resources limits for the ClickHouse containers -## @param resources.requests The requested resources for the ClickHouse containers -## -resources: - limits: {} - requests: {} -## Configure Pods Security Context -## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod -## @param podSecurityContext.enabled Enabled ClickHouse pods' Security Context -## @param podSecurityContext.fsGroup Set ClickHouse pod's Security Context fsGroup -## If you are using Kubernetes 1.18, the following code needs to be commented out. -## -podSecurityContext: - enabled: true - fsGroup: 1001 -## Configure Container Security Context -## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container -## @param containerSecurityContext.enabled Enable containers' Security Context -## @param containerSecurityContext.runAsUser Set containers' Security Context runAsUser -## @param containerSecurityContext.runAsNonRoot Set containers' Security Context runAsNonRoot -## @param containerSecurityContext.readOnlyRootFilesystem Set read only root file system pod's -## @param containerSecurityContext.privileged Set contraller container's Security Context privileged -## @param containerSecurityContext.allowPrivilegeEscalation Set contraller container's Security Context allowPrivilegeEscalation -## @param containerSecurityContext.capabilities.drop List of capabilities to be droppedn -## @param containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile -## -containerSecurityContext: - enabled: true - runAsUser: 1001 - runAsNonRoot: true - privileged: false - allowPrivilegeEscalation: false - readOnlyRootFilesystem: false - capabilities: - drop: ["ALL"] - seccompProfile: - type: "RuntimeDefault" - -## Authentication -## @param auth.username ClickHouse Admin username -## @param auth.password ClickHouse Admin password -## @param auth.existingSecret Name of a secret containing the Admin password -## @param auth.existingSecretKey Name of the key inside the existing secret -## -auth: - username: "" - password: "" - existingSecret: "" - existingSecretKey: "" - -## @param logLevel Logging level -## -logLevel: information - -## @section ClickHouse keeper configuration parameters -## @param keeper.enabled Deploy ClickHouse keeper. Support is experimental. -## -keeper: - enabled: false - -## @param defaultConfigurationOverrides [string] Default configuration overrides (evaluated as a template) -## -defaultConfigurationOverrides: | - - - - - - {{ include "common.names.fullname" . }} - - - - {{ .Values.logLevel }} - - {{- if or (ne (int .Values.shards) 1) (ne (int .Values.replicaCount) 1)}} - - - - {{- $shards := $.Values.shards | int }} - {{- range $shard, $e := until $shards }} - - {{- $replicas := $.Values.replicaCount | int }} - {{- range $i, $_e := until $replicas }} - - {{ printf "%s-shard%d-%d.%s.%s.svc.%s" (include "common.names.fullname" $ ) $shard $i (include "clickhouse.headlessServiceName" $) (include "common.names.namespace" $) $.Values.clusterDomain }} - {{ $.Values.service.ports.tcp }} - - - - {{- end }} - - {{- end }} - - - {{- end }} - {{- if .Values.keeper.enabled }} - - - {{/*ClickHouse keeper configuration using the helm chart */}} - {{ $.Values.containerPorts.keeper }} - {{- if .Values.tls.enabled }} - {{ $.Values.containerPorts.keeperSecure }} - {{- end }} - - /bitnami/clickhouse/keeper/coordination/log - /bitnami/clickhouse/keeper/coordination/snapshots - - - 10000 - 30000 - trace - - - - {{- $nodes := .Values.replicaCount | int }} - {{- range $node, $e := until $nodes }} - - {{ $node | int }} - - {{ $.Values.service.ports.keeperInter }} - - {{- end }} - - - {{- end }} - {{- if or .Values.keeper.enabled .Values.zookeeper.enabled .Values.externalZookeeper.servers }} - - - {{- if or .Values.keeper.enabled }} - {{- $nodes := .Values.replicaCount | int }} - {{- range $node, $e := until $nodes }} - - - {{ $.Values.service.ports.keeper }} - - {{- end }} - {{- else if .Values.zookeeper.enabled }} - {{/* Zookeeper configuration using the helm chart */}} - {{- $nodes := .Values.zookeeper.replicaCount | int }} - {{- range $node, $e := until $nodes }} - - - {{ $.Values.zookeeper.service.ports.client }} - - {{- end }} - {{- else if .Values.externalZookeeper.servers }} - {{/* Zookeeper configuration using an external instance */}} - {{- range $node :=.Values.externalZookeeper.servers }} - - {{ $node }} - {{ $.Values.externalZookeeper.port }} - - {{- end }} - {{- end }} - - {{- end }} - {{- if .Values.tls.enabled }} - - - - - - {{- $certFileName := default "tls.crt" .Values.tls.certFilename }} - {{- $keyFileName := default "tls.key" .Values.tls.certKeyFilename }} - /bitnami/clickhouse/certs/{{$certFileName}} - /bitnami/clickhouse/certs/{{$keyFileName}} - none - true - sslv2,sslv3 - true - {{- if or .Values.tls.autoGenerated .Values.tls.certCAFilename }} - {{- $caFileName := default "ca.crt" .Values.tls.certCAFilename }} - /bitnami/clickhouse/certs/{{$caFileName}} - {{- else }} - true - {{- end }} - - - true - true - sslv2,sslv3 - true - none - - AcceptCertificateHandler - - - - {{- end }} - {{- if .Values.metrics.enabled }} - - - /metrics - - true - true - true - - {{- end }} - - -## @param existingOverridesConfigmap The name of an existing ConfigMap with your custom configuration for ClickHouse -## -existingOverridesConfigmap: "" - -## @param extraOverrides Extra configuration overrides (evaluated as a template) apart from the default -## -extraOverrides: "" - -## @param extraOverridesConfigmap The name of an existing ConfigMap with extra configuration for ClickHouse -## -extraOverridesConfigmap: "" - -## @param extraOverridesSecret The name of an existing ConfigMap with your custom configuration for ClickHouse -## -extraOverridesSecret: "" - -## @param usersExtraOverrides Users extra configuration overrides (evaluated as a template) apart from the default -## -usersExtraOverrides: "" - -## @param usersExtraOverridesConfigmap The name of an existing ConfigMap with users extra configuration for ClickHouse -## -usersExtraOverridesConfigmap: "" - -## @param usersExtraOverridesSecret The name of an existing ConfigMap with your custom users configuration for ClickHouse -## -usersExtraOverridesSecret: "" - -## @param initdbScripts Dictionary of initdb scripts -## Specify dictionary of scripts to be run at first boot -## Example: -## initdbScripts: -## my_init_script.sh: | -## #!/bin/bash -## echo "Do something." -## -initdbScripts: {} -## @param initdbScriptsSecret ConfigMap with the initdb scripts (Note: Overrides `initdbScripts`) -## -initdbScriptsSecret: "" - -## @param startdbScripts Dictionary of startdb scripts -## Specify dictionary of scripts to be run on every start -## Example: -## startdbScripts: -## my_start_script.sh: | -## #!/bin/bash -## echo "Do something." -## -startdbScripts: {} -## @param startdbScriptsSecret ConfigMap with the startdb scripts (Note: Overrides `startdbScripts`) -## -startdbScriptsSecret: "" - -## @param command Override default container command (useful when using custom images) -## -command: - - /scripts/setup.sh -## @param args Override default container args (useful when using custom images) -## -args: [] -## @param hostAliases ClickHouse pods host aliases -## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ -## -hostAliases: [] -## @param podLabels Extra labels for ClickHouse pods -## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ -## -podLabels: {} -## @param podAnnotations Annotations for ClickHouse pods -## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ -## -podAnnotations: {} -## @param podAffinityPreset Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` -## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity -## -podAffinityPreset: "" -## @param podAntiAffinityPreset Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` -## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity -## -podAntiAffinityPreset: soft -## Node affinity preset -## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity -## -nodeAffinityPreset: - ## @param nodeAffinityPreset.type Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` - ## - type: "" - ## @param nodeAffinityPreset.key Node label key to match. Ignored if `affinity` is set - ## - key: "" - ## @param nodeAffinityPreset.values Node label values to match. Ignored if `affinity` is set - ## E.g. - ## values: - ## - e2e-az1 - ## - e2e-az2 - ## - values: [] -## @param affinity Affinity for ClickHouse pods assignment -## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity -## NOTE: `podAffinityPreset`, `podAntiAffinityPreset`, and `nodeAffinityPreset` will be ignored when it's set -## -affinity: {} -## @param nodeSelector Node labels for ClickHouse pods assignment -## ref: https://kubernetes.io/docs/user-guide/node-selection/ -## -nodeSelector: {} -## @param tolerations Tolerations for ClickHouse pods assignment -## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ -## -tolerations: [] -## @param updateStrategy.type ClickHouse statefulset strategy type -## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies -## -updateStrategy: - ## StrategyType - ## Can be set to RollingUpdate or OnDelete - ## - type: RollingUpdate - -## @param podManagementPolicy Statefulset Pod management policy, it needs to be Parallel to be able to complete the cluster join -## Ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#pod-management-policies -## -podManagementPolicy: Parallel - -## @param priorityClassName ClickHouse pods' priorityClassName -## -priorityClassName: "" -## @param topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template -## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods -## -topologySpreadConstraints: [] -## @param schedulerName Name of the k8s scheduler (other than default) for ClickHouse pods -## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ -## -schedulerName: "" -## @param terminationGracePeriodSeconds Seconds Redmine pod needs to terminate gracefully -## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods -## -terminationGracePeriodSeconds: "" -## @param lifecycleHooks for the ClickHouse container(s) to automate configuration before or after startup -## -lifecycleHooks: {} -## @param extraEnvVars Array with extra environment variables to add to ClickHouse nodes -## e.g: -## extraEnvVars: -## - name: FOO -## value: "bar" -## -extraEnvVars: [] -## @param extraEnvVarsCM Name of existing ConfigMap containing extra env vars for ClickHouse nodes -## -extraEnvVarsCM: "" -## @param extraEnvVarsSecret Name of existing Secret containing extra env vars for ClickHouse nodes -## -extraEnvVarsSecret: "" -## @param extraVolumes Optionally specify extra list of additional volumes for the ClickHouse pod(s) -## -extraVolumes: [] -## @param extraVolumeMounts Optionally specify extra list of additional volumeMounts for the ClickHouse container(s) -## -extraVolumeMounts: [] -## @param sidecars Add additional sidecar containers to the ClickHouse pod(s) -## e.g: -## sidecars: -## - name: your-image-name -## image: your-image -## imagePullPolicy: Always -## ports: -## - name: portname -## containerPort: 1234 -## -sidecars: [] -## @param initContainers Add additional init containers to the ClickHouse pod(s) -## ref: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ -## e.g: -## initContainers: -## - name: your-image-name -## image: your-image -## imagePullPolicy: Always -## command: ['sh', '-c', 'echo "hello world"'] -## -initContainers: [] - -## TLS configuration -## -tls: - ## @param tls.enabled Enable TLS traffic support - ## - enabled: false - ## @param tls.autoGenerated Generate automatically self-signed TLS certificates - ## - autoGenerated: false - ## @param tls.certificatesSecret Name of an existing secret that contains the certificates - ## - certificatesSecret: "" - ## @param tls.certFilename Certificate filename - ## - certFilename: "" - ## @param tls.certKeyFilename Certificate key filename - ## - certKeyFilename: "" - ## @param tls.certCAFilename CA Certificate filename - ## If provided, PostgreSQL will authenticate TLS/SSL clients by requesting them a certificate - ## ref: https://www.postgresql.org/docs/9.6/auth-methods.html - ## - certCAFilename: "" - -## @section Traffic Exposure Parameters -## - -## ClickHouse service parameters -## -service: - ## @param service.type ClickHouse service type - ## - type: ClusterIP - ## @param service.ports.http ClickHouse service HTTP port - ## @param service.ports.https ClickHouse service HTTPS port - ## @param service.ports.tcp ClickHouse service TCP port - ## @param service.ports.tcpSecure ClickHouse service TCP (secure) port - ## @param service.ports.keeper ClickHouse keeper TCP container port - ## @param service.ports.keeperSecure ClickHouse keeper TCP (secure) container port - ## @param service.ports.keeperInter ClickHouse keeper interserver TCP container port - ## @param service.ports.mysql ClickHouse service MySQL port - ## @param service.ports.postgresql ClickHouse service PostgreSQL port - ## @param service.ports.interserver ClickHouse service Interserver port - ## @param service.ports.metrics ClickHouse service metrics port - ## - ports: - http: 8123 - https: 443 - tcp: 9000 - tcpSecure: 9440 - keeper: 2181 - keeperSecure: 3181 - keeperInter: 9444 - mysql: 9004 - postgresql: 9005 - interserver: 9009 - metrics: 8001 - ## Node ports to expose - ## @param service.nodePorts.http Node port for HTTP - ## @param service.nodePorts.https Node port for HTTPS - ## @param service.nodePorts.tcp Node port for TCP - ## @param service.nodePorts.tcpSecure Node port for TCP (with TLS) - ## @param service.nodePorts.keeper ClickHouse keeper TCP container port - ## @param service.nodePorts.keeperSecure ClickHouse keeper TCP (secure) container port - ## @param service.nodePorts.keeperInter ClickHouse keeper interserver TCP container port - ## @param service.nodePorts.mysql Node port for MySQL - ## @param service.nodePorts.postgresql Node port for PostgreSQL - ## @param service.nodePorts.interserver Node port for Interserver - ## @param service.nodePorts.metrics Node port for metrics - ## NOTE: choose port between <30000-32767> - ## - nodePorts: - http: "" - https: "" - tcp: "" - tcpSecure: "" - keeper: "" - keeperSecure: "" - keeperInter: "" - mysql: "" - postgresql: "" - interserver: "" - metrics: "" - ## @param service.clusterIP ClickHouse service Cluster IP - ## e.g.: - ## clusterIP: None - ## - clusterIP: "" - ## @param service.loadBalancerIP ClickHouse service Load Balancer IP - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-loadbalancer - ## - loadBalancerIP: "" - ## @param service.loadBalancerSourceRanges ClickHouse service Load Balancer sources - ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service - ## e.g: - ## loadBalancerSourceRanges: - ## - 10.10.10.0/24 - ## - loadBalancerSourceRanges: [] - ## @param service.externalTrafficPolicy ClickHouse service external traffic policy - ## ref http://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip - ## - externalTrafficPolicy: Cluster - ## @param service.annotations Additional custom annotations for ClickHouse service - ## - annotations: {} - ## @param service.extraPorts Extra ports to expose in ClickHouse service (normally used with the `sidecars` value) - ## - extraPorts: [] - ## @param service.sessionAffinity Control where client requests go, to the same pod or round-robin - ## Values: ClientIP or None - ## ref: https://kubernetes.io/docs/user-guide/services/ - ## - sessionAffinity: None - ## @param service.sessionAffinityConfig Additional settings for the sessionAffinity - ## sessionAffinityConfig: - ## clientIP: - ## timeoutSeconds: 300 - ## - sessionAffinityConfig: {} - ## Headless service properties - ## - headless: - ## @param service.headless.annotations Annotations for the headless service. - ## - annotations: {} - -## External Access to ClickHouse configuration -## -externalAccess: - ## @param externalAccess.enabled Enable Kubernetes external cluster access to ClickHouse - ## - enabled: false - ## Parameters to configure K8s service(s) used to externally access ClickHouse - ## Note: A new service per will be created - ## - service: - ## @param externalAccess.service.type Kubernetes Service type for external access. It can be NodePort, LoadBalancer or ClusterIP - ## - type: LoadBalancer - ## @param externalAccess.service.ports.http ClickHouse service HTTP port - ## @param externalAccess.service.ports.https ClickHouse service HTTPS port - ## @param externalAccess.service.ports.tcp ClickHouse service TCP port - ## @param externalAccess.service.ports.tcpSecure ClickHouse service TCP (secure) port - ## @param externalAccess.service.ports.keeper ClickHouse keeper TCP container port - ## @param externalAccess.service.ports.keeperSecure ClickHouse keeper TCP (secure) container port - ## @param externalAccess.service.ports.keeperInter ClickHouse keeper interserver TCP container port - ## @param externalAccess.service.ports.mysql ClickHouse service MySQL port - ## @param externalAccess.service.ports.postgresql ClickHouse service PostgreSQL port - ## @param externalAccess.service.ports.interserver ClickHouse service Interserver port - ## @param externalAccess.service.ports.metrics ClickHouse service metrics port - ## - ports: - http: 80 - https: 443 - tcp: 9000 - tcpSecure: 9440 - keeper: 2181 - keeperSecure: 3181 - keeperInter: 9444 - mysql: 9004 - postgresql: 9005 - interserver: 9009 - metrics: 8001 - ## @param externalAccess.service.loadBalancerIPs Array of load balancer IPs for each ClickHouse . Length must be the same as replicaCount - ## e.g: - ## loadBalancerIPs: - ## - X.X.X.X - ## - Y.Y.Y.Y - ## - loadBalancerIPs: [] - ## @param externalAccess.service.loadBalancerAnnotations Array of load balancer annotations for each ClickHouse . Length must be the same as shards multiplied by replicaCount - ## e.g: - ## loadBalancerAnnotations: - ## - external-dns.alpha.kubernetes.io/hostname: 1.external.example.com. - ## - external-dns.alpha.kubernetes.io/hostname: 2.external.example.com. - ## - loadBalancerAnnotations: [] - ## @param externalAccess.service.loadBalancerSourceRanges Address(es) that are allowed when service is LoadBalancer - ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service - ## e.g: - ## loadBalancerSourceRanges: - ## - 10.10.10.0/24 - ## - loadBalancerSourceRanges: [] - ## @param externalAccess.service.nodePorts.http Node port for HTTP - ## @param externalAccess.service.nodePorts.https Node port for HTTPS - ## @param externalAccess.service.nodePorts.tcp Node port for TCP - ## @param externalAccess.service.nodePorts.tcpSecure Node port for TCP (with TLS) - ## @param externalAccess.service.nodePorts.keeper ClickHouse keeper TCP container port - ## @param externalAccess.service.nodePorts.keeperSecure ClickHouse keeper TCP container port (with TLS) - ## @param externalAccess.service.nodePorts.keeperInter ClickHouse keeper interserver TCP container port - ## @param externalAccess.service.nodePorts.mysql Node port for MySQL - ## @param externalAccess.service.nodePorts.postgresql Node port for PostgreSQL - ## @param externalAccess.service.nodePorts.interserver Node port for Interserver - ## @param externalAccess.service.nodePorts.metrics Node port for metrics - ## NOTE: choose port between <30000-32767> - ## e.g: - ## nodePorts: - ## tls: - ## - 30001 - ## - 30002 - ## - nodePorts: - http: [] - https: [] - tcp: [] - tcpSecure: [] - keeper: [] - keeperSecure: [] - keeperInter: [] - mysql: [] - postgresql: [] - interserver: [] - metrics: [] - ## @param externalAccess.service.labels Service labels for external access - ## - labels: {} - ## @param externalAccess.service.annotations Service annotations for external access - ## - annotations: {} - ## @param externalAccess.service.extraPorts Extra ports to expose in the ClickHouse external service - ## - extraPorts: [] - -## ClickHouse ingress parameters -## ref: http://kubernetes.io/docs/user-guide/ingress/ -## -ingress: - ## @param ingress.enabled Enable ingress record generation for ClickHouse - ## - enabled: false - ## @param ingress.pathType Ingress path type - ## - pathType: ImplementationSpecific - ## @param ingress.apiVersion Force Ingress API version (automatically detected if not set) - ## - apiVersion: "" - ## @param ingress.hostname Default host for the ingress record - ## - hostname: clickhouse.local - ## @param ingress.ingressClassName IngressClass that will be be used to implement the Ingress (Kubernetes 1.18+) - ## This is supported in Kubernetes 1.18+ and required if you have more than one IngressClass marked as the default for your cluster . - ## ref: https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/ - ## - ingressClassName: "" - ## @param ingress.path Default path for the ingress record - ## NOTE: You may need to set this to '/*' in order to use this with ALB ingress controllers - ## - path: / - ## @param ingress.annotations Additional annotations for the Ingress resource. To enable certificate autogeneration, place here your cert-manager annotations. - ## Use this parameter to set the required annotations for cert-manager, see - ## ref: https://cert-manager.io/docs/usage/ingress/#supported-annotations - ## e.g: - ## annotations: - ## kubernetes.io/ingress.class: nginx - ## cert-manager.io/cluster-issuer: cluster-issuer-name - ## - annotations: {} - ## @param ingress.tls Enable TLS configuration for the host defined at `ingress.hostname` parameter - ## TLS certificates will be retrieved from a TLS secret with name: `{{- printf "%s-tls" .Values.ingress.hostname }}` - ## You can: - ## - Use the `ingress.secrets` parameter to create this TLS secret - ## - Rely on cert-manager to create it by setting the corresponding annotations - ## - Rely on Helm to create self-signed certificates by setting `ingress.selfSigned=true` - ## - tls: false - ## @param ingress.selfSigned Create a TLS secret for this ingress record using self-signed certificates generated by Helm - ## - selfSigned: false - ## @param ingress.extraHosts An array with additional hostname(s) to be covered with the ingress record - ## e.g: - ## extraHosts: - ## - name: clickhouse.local - ## path: / - ## - extraHosts: [] - ## @param ingress.extraPaths An array with additional arbitrary paths that may need to be added to the ingress under the main host - ## e.g: - ## extraPaths: - ## - path: /* - ## backend: - ## serviceName: ssl-redirect - ## servicePort: use-annotation - ## - extraPaths: [] - ## @param ingress.extraTls TLS configuration for additional hostname(s) to be covered with this ingress record - ## ref: https://kubernetes.io/docs/concepts/services-networking/ingress/#tls - ## e.g: - ## extraTls: - ## - hosts: - ## - clickhouse.local - ## secretName: clickhouse.local-tls - ## - extraTls: [] - ## @param ingress.secrets Custom TLS certificates as secrets - ## NOTE: 'key' and 'certificate' are expected in PEM format - ## NOTE: 'name' should line up with a 'secretName' set further up - ## If it is not set and you're using cert-manager, this is unneeded, as it will create a secret for you with valid certificates - ## If it is not set and you're NOT using cert-manager either, self-signed certificates will be created valid for 365 days - ## It is also possible to create and manage the certificates outside of this helm chart - ## Please see README.md for more information - ## e.g: - ## secrets: - ## - name: clickhouse.local-tls - ## key: |- - ## -----BEGIN RSA PRIVATE KEY----- - ## ... - ## -----END RSA PRIVATE KEY----- - ## certificate: |- - ## -----BEGIN CERTIFICATE----- - ## ... - ## -----END CERTIFICATE----- - ## - secrets: [] - ## @param ingress.extraRules Additional rules to be covered with this ingress record - ## ref: https://kubernetes.io/docs/concepts/services-networking/ingress/#ingress-rules - ## e.g: - ## extraRules: - ## - host: example.local - ## http: - ## path: / - ## backend: - ## service: - ## name: example-svc - ## port: - ## name: http - ## - extraRules: [] - -## @section Persistence Parameters -## - -## Enable persistence using Persistent Volume Claims -## ref: https://kubernetes.io/docs/user-guide/persistent-volumes/ -## -persistence: - ## @param persistence.enabled Enable persistence using Persistent Volume Claims - ## - enabled: true - ## @param persistence.existingClaim Name of an existing PVC to use - ## - existingClaim: "" - ## @param persistence.storageClass Storage class of backing PVC - ## If defined, storageClassName: - ## If set to "-", storageClassName: "", which disables dynamic provisioning - ## If undefined (the default) or set to null, no storageClassName spec is - ## set, choosing the default provisioner. (gp2 on AWS, standard on - ## GKE, AWS & OpenStack) - ## - storageClass: "" - ## @param persistence.labels Persistent Volume Claim labels - ## - labels: {} - ## @param persistence.annotations Persistent Volume Claim annotations - ## - annotations: {} - ## @param persistence.accessModes Persistent Volume Access Modes - ## - accessModes: - - ReadWriteOnce - ## @param persistence.size Size of data volume - ## - size: 15Gi - ## @param persistence.selector Selector to match an existing Persistent Volume for ClickHouse data PVC - ## If set, the PVC can't have a PV dynamically provisioned for it - ## E.g. - ## selector: - ## matchLabels: - ## app: my-app - ## - selector: {} - ## @param persistence.dataSource Custom PVC data source - ## - dataSource: {} -## @section Init Container Parameters -## - -## 'volumePermissions' init container parameters -## Changes the owner and group of the persistent volume mount point to runAsUser:fsGroup values -## based on the *podSecurityContext/*containerSecurityContext parameters -## -volumePermissions: - ## @param volumePermissions.enabled Enable init container that changes the owner/group of the PV mount point to `runAsUser:fsGroup` - ## - enabled: false - ## OS Shell + Utility image - ## ref: https://hub.docker.com/r/bitnami/os-shell/tags/ - ## @param volumePermissions.image.registry [default: REGISTRY_NAME] OS Shell + Utility image registry - ## @param volumePermissions.image.repository [default: REPOSITORY_NAME/os-shell] OS Shell + Utility image repository - ## @skip volumePermissions.image.tag OS Shell + Utility image tag (immutable tags are recommended) - ## @param volumePermissions.image.pullPolicy OS Shell + Utility image pull policy - ## @param volumePermissions.image.pullSecrets OS Shell + Utility image pull secrets - ## - image: - registry: docker.io - repository: bitnami/os-shell - tag: 11-debian-11-r91 - pullPolicy: IfNotPresent - ## Optionally specify an array of imagePullSecrets. - ## Secrets must be manually created in the namespace. - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ - ## e.g: - ## pullSecrets: - ## - myRegistryKeySecretName - ## - pullSecrets: [] - ## Init container's resource requests and limits - ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ - ## @param volumePermissions.resources.limits The resources limits for the init container - ## @param volumePermissions.resources.requests The requested resources for the init container - ## - resources: - limits: {} - requests: {} - ## Init container Container Security Context - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container - ## @param volumePermissions.containerSecurityContext.runAsUser Set init container's Security Context runAsUser - ## NOTE: when runAsUser is set to special value "auto", init container will try to chown the - ## data folder to auto-determined user&group, using commands: `id -u`:`id -G | cut -d" " -f2` - ## "auto" is especially useful for OpenShift which has scc with dynamic user ids (and 0 is not allowed) - ## - containerSecurityContext: - runAsUser: 0 - -## @section Other Parameters -## - -## ServiceAccount configuration -## -serviceAccount: - ## @param serviceAccount.create Specifies whether a ServiceAccount should be created - ## - create: true - ## @param serviceAccount.name The name of the ServiceAccount to use. - ## If not set and create is true, a name is generated using the common.names.fullname template - ## - name: "" - ## @param serviceAccount.annotations Additional Service Account annotations (evaluated as a template) - ## - annotations: {} - ## @param serviceAccount.automountServiceAccountToken Automount service account token for the server service account - ## - automountServiceAccountToken: true - -## Prometheus metrics -## -metrics: - ## @param metrics.enabled Enable the export of Prometheus metrics - ## - enabled: false - ## @param metrics.podAnnotations [object] Annotations for metrics scraping - ## - podAnnotations: - prometheus.io/scrape: "true" - prometheus.io/port: "{{ .Values.containerPorts.metrics }}" - ## Prometheus Operator ServiceMonitor configuration - ## - serviceMonitor: - ## @param metrics.serviceMonitor.enabled if `true`, creates a Prometheus Operator ServiceMonitor (also requires `metrics.enabled` to be `true`) - ## - enabled: false - ## @param metrics.serviceMonitor.namespace Namespace in which Prometheus is running - ## - namespace: "" - ## @param metrics.serviceMonitor.annotations Additional custom annotations for the ServiceMonitor - ## - annotations: {} - ## @param metrics.serviceMonitor.labels Extra labels for the ServiceMonitor - ## - labels: {} - ## @param metrics.serviceMonitor.jobLabel The name of the label on the target service to use as the job name in Prometheus - ## - jobLabel: "" - ## @param metrics.serviceMonitor.honorLabels honorLabels chooses the metric's labels on collisions with target labels - ## - honorLabels: false - ## @param metrics.serviceMonitor.interval Interval at which metrics should be scraped. - ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint - ## e.g: - ## interval: 10s - ## - interval: "" - ## @param metrics.serviceMonitor.scrapeTimeout Timeout after which the scrape is ended - ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint - ## e.g: - ## scrapeTimeout: 10s - ## - scrapeTimeout: "" - ## @param metrics.serviceMonitor.metricRelabelings Specify additional relabeling of metrics - ## - metricRelabelings: [] - ## @param metrics.serviceMonitor.relabelings Specify general relabeling - ## - relabelings: [] - ## @param metrics.serviceMonitor.selector Prometheus instance selector labels - ## ref: https://github.com/bitnami/charts/tree/main/bitnami/prometheus-operator#prometheus-configuration - ## selector: - ## prometheus: my-prometheus - ## - selector: {} - - ## Prometheus Operator PrometheusRule configuration - ## - prometheusRule: - ## @param metrics.prometheusRule.enabled Create a PrometheusRule for Prometheus Operator - ## - enabled: false - ## @param metrics.prometheusRule.namespace Namespace for the PrometheusRule Resource (defaults to the Release Namespace) - ## - namespace: "" - ## @param metrics.prometheusRule.additionalLabels Additional labels that can be used so PrometheusRule will be discovered by Prometheus - ## - additionalLabels: {} - ## @param metrics.prometheusRule.rules PrometheusRule definitions - ## - alert: ClickhouseServerRestart - ## annotations: - ## message: Clickhouse-server started recently - ## expr: ClickHouseAsyncMetrics_Uptime > 1 < 180 - ## for: 5m - ## labels: - ## severity: warning - rules: [] - -## @section External Zookeeper paramaters -## -externalZookeeper: - ## @param externalZookeeper.servers List of external zookeeper servers to use - ## @param externalZookeeper.port Port of the Zookeeper servers - ## - servers: [] - port: 2888 - -## @section Zookeeper subchart parameters -## -## @param zookeeper.enabled Deploy Zookeeper subchart -## @param zookeeper.replicaCount Number of Zookeeper instances -## @param zookeeper.service.ports.client Zookeeper client port -## -zookeeper: - enabled: false - ## Override zookeeper default image as 3.9 is not supported https://github.com/ClickHouse/ClickHouse/issues/53749 - ## ref: https://github.com/bitnami/containers/tree/main/bitnami/zookeeper - ## @param zookeeper.image.registry [default: REGISTRY_NAME] Zookeeper image registry - ## @param zookeeper.image.repository [default: REPOSITORY_NAME/zookeeper] Zookeeper image repository - ## @skip zookeeper.image.tag Zookeeper image tag (immutable tags are recommended) - ## @param zookeeper.image.pullPolicy Zookeeper image pull policy - image: - registry: docker.io - repository: bitnami/zookeeper - tag: 3.8.3-debian-11-r2 - pullPolicy: IfNotPresent - replicaCount: 3 - service: - ports: - client: 2181 diff --git a/charts/client/Chart.yaml b/charts/client/Chart.yaml index 58ebc2dc..d7a05d6a 100644 --- a/charts/client/Chart.yaml +++ b/charts/client/Chart.yaml @@ -15,7 +15,7 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 1.1.10 +version: 1.1.11 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to @@ -29,8 +29,8 @@ dependencies: repository: https://intelops.github.io/kubviz/ - name: clickhouse condition: clickhouse.enabled - version: 1.0.2 - repository: https://intelops.github.io/kubviz/ + version: 1.0.0 + repository: https://kube-tarian.github.io/helmrepo-supporting-tools/ - name: grafana condition: grafana.enabled version: 1.0.5 diff --git a/charts/client/templates/configmap-clickhouse-datasource.yaml b/charts/client/templates/configmap-clickhouse-datasource.yaml index ddcc43d9..4052616a 100644 --- a/charts/client/templates/configmap-clickhouse-datasource.yaml +++ b/charts/client/templates/configmap-clickhouse-datasource.yaml @@ -15,10 +15,13 @@ data: port: 9000 {{- if .Values.clickhouse.enabled }} server: {{ include "client.fullname" . }}-clickhouse + tlsSkipVerify: true + username: {{ .Values.clickhouse.user }} + secureJsonData: + password: {{ .Values.clickhouse.password }} {{- else }} server: {{ .Values.existingClickhouse.host }} tlsSkipVerify: true - {{- if not .Values.clickhouse.enabled }} {{- if not .Values.existingClickhouse.secret }} username: {{ .Values.existingClickhouse.username }} {{- else }} @@ -30,6 +33,5 @@ data: {{- else }} password: $CLICKHOUSE_PASSWORD {{- end }} - {{- end }} {{- end }} {{- end }} \ No newline at end of file diff --git a/charts/client/templates/configmap-vertamedia-datasource.yaml b/charts/client/templates/configmap-vertamedia-datasource.yaml index 983a15ee..627f1dc6 100644 --- a/charts/client/templates/configmap-vertamedia-datasource.yaml +++ b/charts/client/templates/configmap-vertamedia-datasource.yaml @@ -13,11 +13,15 @@ data: type: vertamedia-clickhouse-datasource {{- if .Values.clickhouse.enabled }} url: {{ include "client.fullname" . }}-clickhouse:8123 + access: proxy + basicAuth: true + basicAuthUser: {{ .Values.clickhouse.user }} + secureJsonData: + basicAuthPassword: {{ .Values.clickhouse.password }} {{- else }} url: {{ .Values.existingClickhouse.host }}:8123 access: proxy - {{- if not .Values.clickhouse.enabled }} - basicAuth: true + basicAuth: true {{- if not .Values.existingClickhouse.secret }} basicAuthUser: {{ .Values.existingClickhouse.username }} {{- else }} @@ -29,6 +33,5 @@ data: {{- else }} basicAuthPassword: $CLICKHOUSE_PASSWORD {{- end }} - {{- end }} {{- end }} {{- end }} \ No newline at end of file diff --git a/charts/client/templates/deployment.yaml b/charts/client/templates/deployment.yaml index bce647bd..64075db4 100644 --- a/charts/client/templates/deployment.yaml +++ b/charts/client/templates/deployment.yaml @@ -39,10 +39,15 @@ spec: env: - name: SCHEMA_PATH value : {{ .Values.migration.schema.path }} - - name: DB_ADDRESS {{- if .Values.clickhouse.enabled }} + - name: DB_ADDRESS value: {{ include "client.fullname" . }}-clickhouse + - name: CLICKHOUSE_USERNAME + value: {{ .Values.clickhouse.user }} + - name: CLICKHOUSE_PASSWORD + value: {{ .Values.clickhouse.password }} {{- else }} + - name: DB_ADDRESS value: {{ .Values.existingClickhouse.host }} - name: CLICKHOUSE_USERNAME {{- if not .Values.existingClickhouse.secret }} @@ -99,10 +104,15 @@ spec: {{- end }} - name: NATS_ADDRESS value: {{ include "client.fullname" . }}-nats - - name: DB_ADDRESS {{- if .Values.clickhouse.enabled }} + - name: DB_ADDRESS value: {{ include "client.fullname" . }}-clickhouse + - name: CLICKHOUSE_USERNAME + value: {{ .Values.clickhouse.user }} + - name: CLICKHOUSE_PASSWORD + value: {{ .Values.clickhouse.password }} {{- else }} + - name: DB_ADDRESS value: {{ .Values.existingClickhouse.host }} - name: CLICKHOUSE_USERNAME {{- if not .Values.existingClickhouse.secret }} diff --git a/charts/client/values.yaml b/charts/client/values.yaml index f612cb05..aa5f94b3 100644 --- a/charts/client/values.yaml +++ b/charts/client/values.yaml @@ -61,10 +61,11 @@ resources: limits: cpu: 200m memory: 256Mi - ephemeral-storage: 100Mi + ephemeral-storage: 50Mi requests: cpu: 100m memory: 128Mi + ephemeral-storage: 50Mi autoscaling: enabled: false @@ -96,8 +97,9 @@ nats: clickhouse: enabled: true - clickhouse: - replicas: "1" + user: admin + password: admin + replicasCount: 1 existingClickhouse: host: clickhouse From 59568dc7dea8c44e08d41b1a7d79a0db25fe4f24 Mon Sep 17 00:00:00 2001 From: Akash LM Date: Mon, 11 Dec 2023 13:19:35 +0530 Subject: [PATCH 6/6] Add persistence support in Agent --- .github/workflows/helm_release.yml | 1 - charts/agent/Chart.yaml | 2 +- charts/agent/templates/deployment.yaml | 60 ++++++++++++++++++++++++-- charts/agent/templates/pvc.yaml | 13 ++++++ charts/agent/values.yaml | 31 +++++++++++-- 5 files changed, 99 insertions(+), 8 deletions(-) create mode 100644 charts/agent/templates/pvc.yaml diff --git a/.github/workflows/helm_release.yml b/.github/workflows/helm_release.yml index f16b4250..597ddaf8 100644 --- a/.github/workflows/helm_release.yml +++ b/.github/workflows/helm_release.yml @@ -23,7 +23,6 @@ jobs: - name: Add Helm repos run: | helm repo add tools https://kube-tarian.github.io/helmrepo-supporting-tools - helm repo add bitnami https://charts.bitnami.com/bitnami - name: Run chart-releaser uses: helm/chart-releaser-action@v1.1.0 diff --git a/charts/agent/Chart.yaml b/charts/agent/Chart.yaml index 9133af69..dfadb805 100644 --- a/charts/agent/Chart.yaml +++ b/charts/agent/Chart.yaml @@ -15,7 +15,7 @@ type: application # This is the chart version. This version number should be incremented each time you make changes # to the chart and its templates, including the app version. # Versions are expected to follow Semantic Versioning (https://semver.org/) -version: 1.1.7 +version: 1.1.8 # This is the version number of the application being deployed. This version number should be # incremented each time you make changes to the application. Versions are not expected to diff --git a/charts/agent/templates/deployment.yaml b/charts/agent/templates/deployment.yaml index 1ce92e1d..05a8abd3 100644 --- a/charts/agent/templates/deployment.yaml +++ b/charts/agent/templates/deployment.yaml @@ -75,8 +75,24 @@ spec: value: "{{ .Values.schedule.kubepreupgradeInterval }}" - name: TRIVY_INTERVAL value: "{{ .Values.schedule.trivyInterval }}" + {{- if .Values.persistence.enabled }} + volumeMounts: + - name: data + mountPath: {{ .Values.persistence.mountPath }} + {{- end }} resources: - {{- toYaml .Values.resources | nindent 12 }} + limits: + cpu: {{ .Values.resources.limits.cpu }} + memory: {{ .Values.resources.limits.memory }} + {{- if not .Values.persistence.enabled }} + ephemeral-storage: {{ .Values.resources.limits.ephemeralstorage }} + {{- end }} + requests: + cpu: {{ .Values.resources.requests.cpu }} + memory: {{ .Values.resources.requests.memory }} + {{- if not .Values.persistence.enabled }} + ephemeral-storage: {{ .Values.resources.requests.ephemeralstorage }} + {{- end }} {{- if .Values.git_bridge.enabled }} - name: git-bridge image: "{{ .Values.git_bridge.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" @@ -99,8 +115,24 @@ spec: {{- end }} - name: NATS_ADDRESS value: {{ .Values.nats.host }} + {{- if .Values.git_bridge.persistence.enabled }} + volumeMounts: + - name: data + mountPath: {{ .Values.git_bridge.persistence.mountPath }} + {{- end }} resources: - {{- toYaml .Values.git_bridge.resources | nindent 12 }} + limits: + cpu: {{ .Values.git_bridge.resources.limits.cpu }} + memory: {{ .Values.git_bridge.resources.limits.memory }} + {{- if not .Values.git_bridge.persistence.enabled }} + ephemeral-storage: {{ .Values.git_bridge.resources.limits.ephemeralstorage }} + {{- end }} + requests: + cpu: {{ .Values.git_bridge.resources.requests.cpu }} + memory: {{ .Values.git_bridge.resources.requests.memory }} + {{- if not .Values.git_bridge.persistence.enabled }} + ephemeral-storage: {{ .Values.git_bridge.resources.requests.ephemeralstorage }} + {{- end }} {{- end }} {{- if .Values.container_bridge.enabled }} - name: container-bridge @@ -124,9 +156,31 @@ spec: {{- end }} - name: NATS_ADDRESS value: {{ .Values.nats.host }} + {{- if .Values.container_bridge.persistence.enabled }} + volumeMounts: + - name: data + mountPath: {{ .Values.container_bridge.persistence.mountPath }} + {{- end }} resources: - {{- toYaml .Values.container_bridge.resources | nindent 12 }} + limits: + cpu: {{ .Values.container_bridge.resources.limits.cpu }} + memory: {{ .Values.container_bridge.resources.limits.memory }} + {{- if not .Values.container_bridge.persistence.enabled }} + ephemeral-storage: {{ .Values.container_bridge.resources.limits.ephemeralstorage }} + {{- end }} + requests: + cpu: {{ .Values.container_bridge.resources.requests.cpu }} + memory: {{ .Values.container_bridge.resources.requests.memory }} + {{- if not .Values.container_bridge.persistence.enabled }} + ephemeral-storage: {{ .Values.container_bridge.resources.requests.ephemeralstorage }} + {{- end }} {{- end }} + {{- if .Values.persistence.enabled }} + volumes: + - name: data + persistentVolumeClaim: + claimName: {{ include "agent.fullname" . }}-data + {{- end }} {{- with .Values.nodeSelector }} nodeSelector: {{- toYaml . | nindent 8 }} diff --git a/charts/agent/templates/pvc.yaml b/charts/agent/templates/pvc.yaml new file mode 100644 index 00000000..920c3dab --- /dev/null +++ b/charts/agent/templates/pvc.yaml @@ -0,0 +1,13 @@ +{{- if .Values.persistence.enabled }} +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: {{ include "agent.fullname" . }}-data +spec: + accessModes: + - {{ .Values.persistence.accessMode }} + storageClassName: {{ .Values.persistence.storageClass | default "" }} + resources: + requests: + storage: {{ .Values.persistence.size }} +{{- end }} \ No newline at end of file diff --git a/charts/agent/values.yaml b/charts/agent/values.yaml index 7613d4ca..48aa6710 100644 --- a/charts/agent/values.yaml +++ b/charts/agent/values.yaml @@ -52,10 +52,18 @@ git_bridge: limits: cpu: 200m memory: 256Mi - ephemeral-storage: 100Mi + ephemeralstorage: 100Mi requests: cpu: 200m memory: 256Mi + ephemeralstorage: 100Mi + persistence: + enabled: true + existingClaim: "" + storageClass: "" + mountPath: /mnt/agent/gb + accessMode: ReadWriteOnce + size: 5Gi ingress: enabled: true annotations: @@ -87,10 +95,18 @@ container_bridge: limits: cpu: 200m memory: 256Mi - ephemeral-storage: 100Mi + ephemeralstorage: 100Mi requests: cpu: 200m memory: 256Mi + ephemeralstorage: 100Mi + persistence: + enabled: true + existingClaim: "" + storageClass: "" + mountPath: /mnt/agent/cb + accessMode: ReadWriteOnce + size: 5Gi ingress: enabled: true annotations: @@ -127,10 +143,19 @@ resources: limits: cpu: 2 memory: 2Gi - ephemeral-storage: 1Gi + ephemeralstorage: 1Gi requests: cpu: 200m memory: 256Mi + ephemeralstorage: 256Mi + +persistence: + enabled: true + existingClaim: "" + storageClass: "" + mountPath: /mnt/agent/kbz + accessMode: ReadWriteOnce + size: 5Gi autoscaling: enabled: false