diff --git a/cluster/manifests/freeleaps-data-platform/flink/values.yaml b/cluster/manifests/freeleaps-data-platform/flink/values.yaml new file mode 100644 index 00000000..fc317636 --- /dev/null +++ b/cluster/manifests/freeleaps-data-platform/flink/values.yaml @@ -0,0 +1,897 @@ +# Copyright Broadcom, Inc. All Rights Reserved. +# SPDX-License-Identifier: APACHE-2.0 + +## @section Global parameters +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry, imagePullSecrets and storageClass +## + +## @param global.imageRegistry Global Docker image registry +## @param global.imagePullSecrets Global Docker registry secret names as an array +## @param global.defaultStorageClass Global default StorageClass for Persistent Volume(s) +## @param global.storageClass DEPRECATED: use global.defaultStorageClass instead +## +global: + imageRegistry: "" + ## E.g. + ## imagePullSecrets: + ## - myRegistryKeySecretName + ## + imagePullSecrets: [] + defaultStorageClass: "" + storageClass: "azure-disk-std-lrs" + ## Security parameters + ## + security: + ## @param global.security.allowInsecureImages Allows skipping image verification + allowInsecureImages: false + ## Compatibility adaptations for Kubernetes platforms + ## + compatibility: + ## Compatibility adaptations for Openshift + ## + openshift: + ## @param global.compatibility.openshift.adaptSecurityContext Adapt the securityContext sections of the deployment to make them compatible with Openshift restricted-v2 SCC: remove runAsUser, runAsGroup and fsGroup and let the platform use their allowed default IDs. Possible values: auto (apply if the detected running cluster is Openshift), force (perform the adaptation always), disabled (do not perform adaptation) + ## + adaptSecurityContext: auto +## @section Common parameters +## + +## @param nameOverride String to partially override common.names.fullname +## +nameOverride: "" +## @param fullnameOverride String to fully override common.names.fullname +## +fullnameOverride: "" +## @param kubeVersion Force target Kubernetes version (using Helm capabilities if not set) +## +kubeVersion: "" +## @param commonLabels Labels to add to all deployed objects (sub-charts are not considered) +## +commonLabels: {} +## @param commonAnnotations Annotations to add to all deployed objects +## +commonAnnotations: {} +## @param clusterDomain Default Kubernetes cluster domain +## +clusterDomain: freeleaps.cluster +## @param extraDeploy Array of extra objects to deploy with the release +## +extraDeploy: [] +## Enable diagnostic mode in the deployment +## +diagnosticMode: + ## @param diagnosticMode.enabled Enable diagnostic mode (all probes will be disabled and the command will be overridden) + ## + enabled: false + ## @param diagnosticMode.command Command to override all containers in the deployment + ## + command: + - sleep + ## @param diagnosticMode.args Args to override all containers in the deployment + ## + args: + - infinity +## @section Apache Flink parameters +## + +## Bitnami Apache Flink image +## ref: https://hub.docker.com/r/bitnami/flink/tags/ +## @param image.registry [default: REGISTRY_NAME] Apache Flink image registry +## @param image.repository [default: REPOSITORY_NAME/flink] Apache Flink image repository +## @skip image.tag Apache Flink image tag (immutable tags are recommended) +## @param image.digest Apache Flink image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag +## @param image.pullPolicy image pull policy +## @param image.pullSecrets Apache Flink image pull secrets +## @param image.debug Enable image debug mode +## +image: + registry: docker.io + repository: bitnami/flink + tag: 2.0.0-debian-12-r7 + digest: "" + ## Specify a imagePullPolicy + ## ref: https://kubernetes.io/docs/concepts/containers/images/#pre-pulled-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## e.g: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Enable debug mode + ## + debug: false +## @section Jobmanager deployment parameters +## +jobmanager: + ## @param jobmanager.command Command for running the container (set to default if not set). Use array form + ## + command: [] + ## @param jobmanager.args Args for running the container (set to default if not set). Use array form + ## + args: [] + ## @param jobmanager.lifecycleHooks [object] Override default etcd container hooks + ## + lifecycleHooks: {} + ## @param jobmanager.automountServiceAccountToken Mount Service Account token in pod + ## + automountServiceAccountToken: false + ## @param jobmanager.hostAliases Set pod host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## @param jobmanager.extraEnvVars Extra environment variables to be set on flink container + ## For example: + ## - name: FOO + ## value: BAR + ## + extraEnvVars: [] + ## @param jobmanager.extraEnvVarsCM Name of existing ConfigMap containing extra env vars + ## + extraEnvVarsCM: "" + ## @param jobmanager.extraEnvVarsSecret Name of existing Secret containing extra env vars + ## + extraEnvVarsSecret: "" + ## @param jobmanager.replicaCount Number of Apache Flink Jobmanager replicas + ## + replicaCount: 1 + ## Configure extra options for container's liveness, readiness and startup probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes + ## @param jobmanager.livenessProbe.enabled Enable livenessProbe on Jobmanager nodes + ## @param jobmanager.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe + ## @param jobmanager.livenessProbe.periodSeconds Period seconds for livenessProbe + ## @param jobmanager.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe + ## @param jobmanager.livenessProbe.failureThreshold Failure threshold for livenessProbe + ## @param jobmanager.livenessProbe.successThreshold Success threshold for livenessProbe + ## + livenessProbe: + enabled: true + failureThreshold: 3 + initialDelaySeconds: 20 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + ## @param jobmanager.startupProbe.enabled Enable startupProbe on Jobmanager containers + ## @param jobmanager.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe + ## @param jobmanager.startupProbe.periodSeconds Period seconds for startupProbe + ## @param jobmanager.startupProbe.timeoutSeconds Timeout seconds for startupProbe + ## @param jobmanager.startupProbe.failureThreshold Failure threshold for startupProbe + ## @param jobmanager.startupProbe.successThreshold Success threshold for startupProbe + ## + startupProbe: + enabled: true + initialDelaySeconds: 20 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 15 + successThreshold: 1 + ## @param jobmanager.readinessProbe.enabled Enable readinessProbe + ## @param jobmanager.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe + ## @param jobmanager.readinessProbe.periodSeconds Period seconds for readinessProbe + ## @param jobmanager.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe + ## @param jobmanager.readinessProbe.failureThreshold Failure threshold for readinessProbe + ## @param jobmanager.readinessProbe.successThreshold Success threshold for readinessProbe + ## + readinessProbe: + enabled: true + initialDelaySeconds: 20 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 15 + successThreshold: 1 + ## @param jobmanager.customLivenessProbe Custom livenessProbe that overrides the default one + ## + customLivenessProbe: {} + ## @param jobmanager.customStartupProbe [object] Override default startup probe + ## + customStartupProbe: {} + ## @param jobmanager.customReadinessProbe [object] Override default readiness probe + ## + customReadinessProbe: {} + ## Apache Flink pods' resource requests and limits + ## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ + ## Minimum memory for development is 4GB and 2 CPU cores + ## Minimum memory for production is 8GB and 4 CPU cores + ## ref: http://docs.datastax.com/en/archived/flink/2.0/flink/architecture/architecturePlanningHardware_c.html + ## + ## We usually recommend not to specify default resources and to leave this as a conscious + ## choice for the user. This also increases chances charts run on environments with little + ## resources, such as Minikube. If you do want to specify resources, uncomment the following + ## lines, adjust them as necessary, and remove the curly braces after 'resources:'. + ## @param jobmanager.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if jobmanager.resources is set (jobmanager.resources is recommended for production). + ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15 + ## + resourcesPreset: "small" + ## @param jobmanager.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads) + ## Example: + ## resources: + ## requests: + ## cpu: 2 + ## memory: 512Mi + ## limits: + ## cpu: 3 + ## memory: 1024Mi + ## + resources: + requests: + cpu: 200Mi + memory: 1Gi + limits: + cpu: 500Mi + memory: 2Gi + ## @param jobmanager.extraVolumeMounts Optionally specify extra list of additional volumeMounts for flink container + ## + extraVolumeMounts: [] + ## Container ports to expose + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + containerPorts: + ## @param jobmanager.containerPorts.rpc Port for RPC + ## + rpc: 6123 + ## @param jobmanager.containerPorts.http Port for http UI + ## + http: 8081 + ## @param jobmanager.containerPorts.blob Port for blob server + ## + blob: 6124 + ## Apache Flink jobmanager.service parameters + ## + service: + ## @param jobmanager.service.type Apache Flink service type + ## + type: ClusterIP + ## Ports to expose + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ports: + ## @param jobmanager.service.ports.rpc Port for RPC + ## + rpc: 6123 + ## @param jobmanager.service.ports.http Port for http UI + ## + http: 8081 + ## @param jobmanager.service.ports.blob Port for blob server + ## Due the Apache Flink specificities this port should match the jobmanager.containerPorts.blob port. The taskmanager should be + ## able to communicate with the jobmanager through the port jobmanager indicates to the taskmanager, being the jobmanager not aware of the service port. + blob: 6124 + ## Node ports to expose + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + nodePorts: + ## @param jobmanager.service.nodePorts.rpc Node port for RPC + ## + rpc: "" + ## @param jobmanager.service.nodePorts.http Node port for http UI + ## + http: "" + ## @param jobmanager.service.nodePorts.blob Port for blob server + ## + blob: "" + ## @param jobmanager.service.extraPorts Extra ports to expose in the service (normally used with the `sidecar` value) + ## + extraPorts: [] + ## @param jobmanager.service.loadBalancerIP LoadBalancerIP if service type is `LoadBalancer` + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + loadBalancerIP: "" + ## @param jobmanager.service.loadBalancerSourceRanges Service Load Balancer sources + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## e.g: + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## @param jobmanager.service.clusterIP Service Cluster IP + ## e.g.: + ## clusterIP: None + ## + clusterIP: "" + ## @param jobmanager.service.externalTrafficPolicy Service external traffic policy + ## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + ## @param jobmanager.service.annotations Provide any additional annotations which may be required. + ## This can be used to set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + ## @param jobmanager.service.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP" + ## If "ClientIP", consecutive client requests will be directed to the same Pod + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + ## + sessionAffinity: None + ## @param jobmanager.service.sessionAffinityConfig Additional settings for the sessionAffinity + ## sessionAffinityConfig: + ## clientIP: + ## timeoutSeconds: 300 + ## + sessionAffinityConfig: {} + ## Network Policies + ## Ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/ + ## + networkPolicy: + ## @param jobmanager.networkPolicy.enabled Specifies whether a NetworkPolicy should be created + ## + enabled: true + ## @param jobmanager.networkPolicy.allowExternal Don't require server label for connections + ## The Policy model to apply. When set to false, only pods with the correct + ## server label will have network access to the ports server is listening + ## on. When true, server will accept connections from any source + ## (with the correct destination port). + ## + allowExternal: true + ## @param jobmanager.networkPolicy.allowExternalEgress Allow the pod to access any range of port and all destinations. + ## + allowExternalEgress: true + ## @param jobmanager.networkPolicy.extraIngress [array] Add extra ingress rules to the NetworkPolicy + ## e.g: + ## extraIngress: + ## - ports: + ## - port: 1234 + ## from: + ## - podSelector: + ## - matchLabels: + ## - role: frontend + ## - podSelector: + ## - matchExpressions: + ## - key: role + ## operator: In + ## values: + ## - frontend + extraIngress: [] + ## @param jobmanager.networkPolicy.extraEgress [array] Add extra ingress rules to the NetworkPolicy + ## e.g: + ## extraEgress: + ## - ports: + ## - port: 1234 + ## to: + ## - podSelector: + ## - matchLabels: + ## - role: frontend + ## - podSelector: + ## - matchExpressions: + ## - key: role + ## operator: In + ## values: + ## - frontend + ## + extraEgress: [] + ## @param jobmanager.networkPolicy.ingressNSMatchLabels [object] Labels to match to allow traffic from other namespaces + ## @param jobmanager.networkPolicy.ingressNSPodMatchLabels [object] Pod labels to match to allow traffic from other namespaces + ## + ingressNSMatchLabels: {} + ingressNSPodMatchLabels: {} + ## Apache Flink Jobmanager serviceAccount parameters + ## + serviceAccount: + ## @param jobmanager.serviceAccount.create Enables ServiceAccount + ## + create: true + ## @param jobmanager.serviceAccount.name ServiceAccount name + ## + name: "" + ## @param jobmanager.serviceAccount.annotations Annotations to add to all deployed objects + ## + annotations: {} + ## @param jobmanager.serviceAccount.automountServiceAccountToken Automount API credentials for a service account. + ## + automountServiceAccountToken: false + ## Pod security context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param jobmanager.podSecurityContext.enabled Enabled Apache Flink pods' Security Context + ## @param jobmanager.podSecurityContext.fsGroupChangePolicy Set filesystem group change policy + ## @param jobmanager.podSecurityContext.sysctls Set kernel settings using the sysctl interface + ## @param jobmanager.podSecurityContext.supplementalGroups Set filesystem extra groups + ## @param jobmanager.podSecurityContext.fsGroup Set Apache Flink pod's Security Context fsGroup + ## + podSecurityContext: + enabled: true + fsGroupChangePolicy: Always + sysctls: [] + supplementalGroups: [] + fsGroup: 1001 + ## Configure Container Security Context (only main container) + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container + ## @param jobmanager.containerSecurityContext.enabled Enabled Apache Flink containers' Security Context + ## @param jobmanager.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container + ## @param jobmanager.containerSecurityContext.runAsUser Set Apache Flink container's Security Context runAsUser + ## @param jobmanager.containerSecurityContext.runAsGroup Set Apache Flink container's Security Context runAsGroup + ## @param jobmanager.containerSecurityContext.runAsNonRoot Force the container to be run as non root + ## @param jobmanager.containerSecurityContext.allowPrivilegeEscalation Allows privilege escalation + ## @param jobmanager.containerSecurityContext.readOnlyRootFilesystem Set container's Security Context readOnlyRootFilesystem + ## @param jobmanager.containerSecurityContext.privileged Set primary container's Security Context privileged + ## @param jobmanager.containerSecurityContext.capabilities.drop List of capabilities to be dropped + ## @param jobmanager.containerSecurityContext.seccompProfile.type Rules specifying actions to take based on the requested syscall + ## + containerSecurityContext: + enabled: true + seLinuxOptions: {} + runAsUser: 1001 + runAsGroup: 1001 + runAsNonRoot: true + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + privileged: false + capabilities: + drop: ["ALL"] + seccompProfile: + type: "RuntimeDefault" + ## @param jobmanager.podAnnotations Additional pod annotations + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + podAnnotations: {} + ## @param jobmanager.podLabels Additional pod labels + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + ## + podLabels: {} + ## @param jobmanager.podAffinityPreset Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAffinityPreset: "" + ## @param jobmanager.podAntiAffinityPreset Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` + ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAntiAffinityPreset: soft + ## Node affinity preset + ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + ## + nodeAffinityPreset: + ## @param jobmanager.nodeAffinityPreset.type Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` + ## + type: "" + ## @param jobmanager.nodeAffinityPreset.key Node label key to match. Ignored if `affinity` is set + ## + key: "" + ## @param jobmanager.nodeAffinityPreset.values Node label values to match. Ignored if `affinity` is set + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + ## @param jobmanager.priorityClassName Server priorityClassName + ## Ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ + ## + priorityClassName: "" + ## @param jobmanager.affinity Affinity for pod assignment + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## NOTE: podAffinityPreset, podAntiAffinityPreset, and nodeAffinityPreset will be ignored when it's set + ## + affinity: {} + ## @param jobmanager.nodeSelector Node labels for pod assignment + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/ + ## + nodeSelector: {} + ## @param jobmanager.tolerations Tolerations for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + ## @param jobmanager.topologySpreadConstraints Topology Spread Constraints for pod assignment + ## https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ + ## The value is evaluated as a template + ## + topologySpreadConstraints: [] + ## @param jobmanager.schedulerName Alternative scheduler + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + schedulerName: "" + ## @param jobmanager.updateStrategy.type Apache Flink jobmanager deployment strategy type + ## @param jobmanager.updateStrategy.rollingUpdate [object,nullable] Apache Flink jobmanager deployment rolling update configuration parameters + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy + ## + updateStrategy: + type: RollingUpdate + rollingUpdate: null + ## @param jobmanager.extraVolumes Optionally specify extra list of additional volumes for flink container + ## + extraVolumes: [] + ## @param jobmanager.initContainers Add additional init containers to the flink pods + ## + initContainers: [] + ## @param jobmanager.sidecars Add additional sidecar containers to the flink pods + ## + sidecars: [] + + ## @param jobmanager.pdb.create Enable/disable a Pod Disruption Budget creation + ## @param jobmanager.pdb.minAvailable Minimum number/percentage of pods that should remain scheduled + ## @param jobmanager.pdb.maxUnavailable Maximum number/percentage of pods that may be made unavailable.Defaults to `1` if both `secondary.pdb.minAvailable` and `secondary.pdb.maxUnavailable` are empty. + ## + pdb: + create: true + minAvailable: "" + maxUnavailable: "" +## @section TaskManager deployment parameters +## +taskmanager: + ## @param taskmanager.command Command for running the container (set to default if not set). Use array form + ## + command: [] + ## @param taskmanager.args Args for running the container (set to default if not set). Use array form + ## + args: [] + ## @param taskmanager.lifecycleHooks [object] Override default etcd container hooks + ## + lifecycleHooks: {} + ## @param taskmanager.automountServiceAccountToken Mount Service Account token in pod + ## + automountServiceAccountToken: false + ## @param taskmanager.hostAliases Set pod host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## @param taskmanager.extraEnvVars Extra environment variables to be set on flink container + ## For example: + ## - name: FOO + ## value: BAR + ## + extraEnvVars: [] + ## @param taskmanager.extraEnvVarsCM Name of existing ConfigMap containing extra env vars + ## + extraEnvVarsCM: "" + ## @param taskmanager.extraEnvVarsSecret Name of existing Secret containing extra env vars + ## + extraEnvVarsSecret: "" + ## @param taskmanager.replicaCount Number of Apache Flink replicas + ## + replicaCount: 1 + ## Configure extra options for container's liveness, readiness and startup probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes + ## @param taskmanager.livenessProbe.enabled Enable livenessProbe on taskmanager nodes + ## @param taskmanager.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe + ## @param taskmanager.livenessProbe.periodSeconds Period seconds for livenessProbe + ## @param taskmanager.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe + ## @param taskmanager.livenessProbe.failureThreshold Failure threshold for livenessProbe + ## @param taskmanager.livenessProbe.successThreshold Success threshold for livenessProbe + ## + livenessProbe: + enabled: true + failureThreshold: 3 + initialDelaySeconds: 20 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + ## @param taskmanager.startupProbe.enabled Enable startupProbe on taskmanager containers + ## @param taskmanager.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe + ## @param taskmanager.startupProbe.periodSeconds Period seconds for startupProbe + ## @param taskmanager.startupProbe.timeoutSeconds Timeout seconds for startupProbe + ## @param taskmanager.startupProbe.failureThreshold Failure threshold for startupProbe + ## @param taskmanager.startupProbe.successThreshold Success threshold for startupProbe + ## + startupProbe: + enabled: true + initialDelaySeconds: 20 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 15 + successThreshold: 1 + ## @param taskmanager.readinessProbe.enabled Enable readinessProbe + ## @param taskmanager.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe + ## @param taskmanager.readinessProbe.periodSeconds Period seconds for readinessProbe + ## @param taskmanager.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe + ## @param taskmanager.readinessProbe.failureThreshold Failure threshold for readinessProbe + ## @param taskmanager.readinessProbe.successThreshold Success threshold for readinessProbe + ## + readinessProbe: + enabled: true + initialDelaySeconds: 20 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 15 + successThreshold: 1 + ## @param taskmanager.customLivenessProbe Custom livenessProbe that overrides the default one + ## + customLivenessProbe: {} + ## @param taskmanager.customStartupProbe [object] Override default startup probe + ## + customStartupProbe: {} + ## @param taskmanager.customReadinessProbe [object] Override default readiness probe + ## + customReadinessProbe: {} + ## Apache Flink pods' resource requests and limits + ## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ + ## Minimum memory for development is 4GB and 2 CPU cores + ## Minimum memory for production is 8GB and 4 CPU cores + ## ref: http://docs.datastax.com/en/archived/flink/2.0/flink/architecture/architecturePlanningHardware_c.html + ## + ## We usually recommend not to specify default resources and to leave this as a conscious + ## choice for the user. This also increases chances charts run on environments with little + ## resources, such as Minikube. If you do want to specify resources, uncomment the following + ## lines, adjust them as necessary, and remove the curly braces after 'resources:'. + ## @param taskmanager.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if taskmanager.resources is set (taskmanager.resources is recommended for production). + ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15 + ## + resourcesPreset: "small" + ## @param taskmanager.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads) + ## Example: + ## resources: + ## requests: + ## cpu: 2 + ## memory: 512Mi + ## limits: + ## cpu: 3 + ## memory: 1024Mi + ## + resources: + requests: + cpu: 200Mi + memory: 1Gi + limits: + cpu: 500Mi + memory: 2Gi + ## @param taskmanager.extraVolumeMounts Optionally specify extra list of additional volumeMounts for flink container + ## + extraVolumeMounts: [] + ## Container ports to expose + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## @param taskmanager.containerPorts.data data exchange port + ## @param taskmanager.containerPorts.rpc Port for RPC + ## @param taskmanager.containerPorts.internalMetrics Port for internal metrics query service + ## + containerPorts: + data: 6121 + rpc: 6122 + internalMetrics: 6126 + ## Apache Flink taskmanager.service parameters + ## + service: + ## @param taskmanager.service.type Apache Flink service type + ## + type: ClusterIP + ## Ports to expose + ## @param taskmanager.service.ports.data data exchange port + ## @param taskmanager.service.ports.rpc Port for RPC + ## @param taskmanager.service.ports.internalMetrics Port for internal metrics query service + ## + ports: + data: 6121 + rpc: 6122 + internalMetrics: 6126 + ## Node ports to expose + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## @param taskmanager.service.nodePorts.data data exchange port + ## @param taskmanager.service.nodePorts.rpc Port for RPC + ## @param taskmanager.service.nodePorts.internalMetrics Port for internal metrics query service + ## + nodePorts: + data: "" + rpc: "" + internalMetrics: "" + ## @param taskmanager.service.extraPorts Extra ports to expose in the service (normally used with the `sidecar` value) + ## + extraPorts: [] + ## @param taskmanager.service.loadBalancerIP LoadBalancerIP if service type is `LoadBalancer` + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + loadBalancerIP: "" + ## @param taskmanager.service.loadBalancerSourceRanges Service Load Balancer sources + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## e.g: + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## @param taskmanager.service.clusterIP Service Cluster IP + ## e.g.: + ## clusterIP: None + ## + clusterIP: "" + ## @param taskmanager.service.externalTrafficPolicy Service external traffic policy + ## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + ## @param taskmanager.service.annotations Provide any additional annotations which may be required. + ## This can be used to set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + ## @param taskmanager.service.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP" + ## If "ClientIP", consecutive client requests will be directed to the same Pod + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies + ## + sessionAffinity: None + ## @param taskmanager.service.sessionAffinityConfig Additional settings for the sessionAffinity + ## sessionAffinityConfig: + ## clientIP: + ## timeoutSeconds: 300 + ## + sessionAffinityConfig: {} + ## Network Policies + ## Ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/ + ## + networkPolicy: + ## @param taskmanager.networkPolicy.enabled Specifies whether a NetworkPolicy should be created + ## + enabled: true + ## @param taskmanager.networkPolicy.allowExternal Don't require server label for connections + ## The Policy model to apply. When set to false, only pods with the correct + ## server label will have network access to the ports server is listening + ## on. When true, server will accept connections from any source + ## (with the correct destination port). + ## + allowExternal: true + ## @param taskmanager.networkPolicy.allowExternalEgress Allow the pod to access any range of port and all destinations. + ## + allowExternalEgress: true + ## @param taskmanager.networkPolicy.extraIngress [array] Add extra ingress rules to the NetworkPolicy + ## e.g: + ## extraIngress: + ## - ports: + ## - port: 1234 + ## from: + ## - podSelector: + ## - matchLabels: + ## - role: frontend + ## - podSelector: + ## - matchExpressions: + ## - key: role + ## operator: In + ## values: + ## - frontend + extraIngress: [] + ## @param taskmanager.networkPolicy.extraEgress [array] Add extra ingress rules to the NetworkPolicy + ## e.g: + ## extraEgress: + ## - ports: + ## - port: 1234 + ## to: + ## - podSelector: + ## - matchLabels: + ## - role: frontend + ## - podSelector: + ## - matchExpressions: + ## - key: role + ## operator: In + ## values: + ## - frontend + ## + extraEgress: [] + ## @param taskmanager.networkPolicy.ingressNSMatchLabels [object] Labels to match to allow traffic from other namespaces + ## @param taskmanager.networkPolicy.ingressNSPodMatchLabels [object] Pod labels to match to allow traffic from other namespaces + ## + ingressNSMatchLabels: {} + ingressNSPodMatchLabels: {} + ## Apache Flink taskmanager serviceAccount parameters + ## + serviceAccount: + ## @param taskmanager.serviceAccount.create Enables ServiceAccount + ## + create: true + ## @param taskmanager.serviceAccount.name ServiceAccount name + ## + name: "" + ## @param taskmanager.serviceAccount.annotations Annotations to add to all deployed objects + ## + annotations: {} + ## @param taskmanager.serviceAccount.automountServiceAccountToken Automount API credentials for a service account. + ## + automountServiceAccountToken: false + ## Pod security context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param taskmanager.podSecurityContext.enabled Enabled Apache Flink pods' Security Context + ## @param taskmanager.podSecurityContext.fsGroupChangePolicy Set filesystem group change policy + ## @param taskmanager.podSecurityContext.sysctls Set kernel settings using the sysctl interface + ## @param taskmanager.podSecurityContext.supplementalGroups Set filesystem extra groups + ## @param taskmanager.podSecurityContext.fsGroup Set Apache Flink pod's Security Context fsGroup + ## + podSecurityContext: + enabled: true + fsGroupChangePolicy: Always + sysctls: [] + supplementalGroups: [] + fsGroup: 1001 + ## Configure Container Security Context (only main container) + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container + ## @param taskmanager.containerSecurityContext.enabled Enabled Apache Flink containers' Security Context + ## @param taskmanager.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container + ## @param taskmanager.containerSecurityContext.runAsUser Set Apache Flink container's Security Context runAsUser + ## @param taskmanager.containerSecurityContext.runAsGroup Set Apache Flink container's Security Context runAsGroup + ## @param taskmanager.containerSecurityContext.runAsNonRoot Force the container to be run as non root + ## @param taskmanager.containerSecurityContext.privileged Set primary container's Security Context privileged + ## @param taskmanager.containerSecurityContext.allowPrivilegeEscalation Allows privilege escalation + ## @param taskmanager.containerSecurityContext.readOnlyRootFilesystem Set container's Security Context readOnlyRootFilesystem + ## @param taskmanager.containerSecurityContext.capabilities.drop List of capabilities to be dropped + ## @param taskmanager.containerSecurityContext.seccompProfile.type Rules specifying actions to take based on the requested syscall + ## + containerSecurityContext: + enabled: true + seLinuxOptions: {} + runAsUser: 1001 + runAsGroup: 1001 + runAsNonRoot: true + privileged: false + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + capabilities: + drop: ["ALL"] + seccompProfile: + type: "RuntimeDefault" + ## @param taskmanager.podAnnotations Additional pod annotations + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + podAnnotations: {} + ## @param taskmanager.podLabels Additional pod labels + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + ## + podLabels: {} + ## @param taskmanager.podAffinityPreset Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAffinityPreset: "" + ## @param taskmanager.podAntiAffinityPreset Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` + ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAntiAffinityPreset: soft + ## Node affinity preset + ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + ## + nodeAffinityPreset: + ## @param taskmanager.nodeAffinityPreset.type Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` + ## + type: "" + ## @param taskmanager.nodeAffinityPreset.key Node label key to match. Ignored if `affinity` is set + ## + key: "" + ## @param taskmanager.nodeAffinityPreset.values Node label values to match. Ignored if `affinity` is set + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + ## @param taskmanager.priorityClassName Server priorityClassName + ## Ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ + ## + priorityClassName: "" + ## @param taskmanager.affinity Affinity for pod assignment + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## NOTE: podAffinityPreset, podAntiAffinityPreset, and nodeAffinityPreset will be ignored when it's set + ## + affinity: {} + ## @param taskmanager.nodeSelector Node labels for pod assignment + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/ + ## + nodeSelector: {} + ## @param taskmanager.tolerations Tolerations for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + ## @param taskmanager.topologySpreadConstraints Topology Spread Constraints for pod assignment + ## https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ + ## The value is evaluated as a template + ## + topologySpreadConstraints: [] + ## @param taskmanager.schedulerName Alternative scheduler + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + schedulerName: "" + ## @param taskmanager.podManagementPolicy Pod management policy for the Apache Flink taskmanager statefulset + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#pod-management-policies + ## + podManagementPolicy: Parallel + ## @param taskmanager.updateStrategy.type Apache Flink taskmanager statefulset strategy type + ## @param taskmanager.updateStrategy.rollingUpdate [object,nullable] Apache Flink taskmanager statefulset rolling update configuration parameters + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies + ## + updateStrategy: + type: RollingUpdate + rollingUpdate: null + ## @param taskmanager.extraVolumes Optionally specify extra list of additional volumes for flink container + ## + extraVolumes: [] + ## @param taskmanager.initContainers Add additional init containers to the flink pods + ## + initContainers: [] + ## @param taskmanager.sidecars Add additional sidecar containers to the flink pods + ## + sidecars: [] + ## @param taskmanager.pdb.create Enable/disable a Pod Disruption Budget creation + ## @param taskmanager.pdb.minAvailable Minimum number/percentage of pods that should remain scheduled + ## @param taskmanager.pdb.maxUnavailable Maximum number/percentage of pods that may be made unavailable.Defaults to `1` if both `secondary.pdb.minAvailable` and `secondary.pdb.maxUnavailable` are empty. + ## + pdb: + create: true + minAvailable: "" + maxUnavailable: "" diff --git a/cluster/manifests/freeleaps-data-platform/kafka/values.yaml b/cluster/manifests/freeleaps-data-platform/kafka/values.yaml index 5829b3fd..0fb72ed6 100644 --- a/cluster/manifests/freeleaps-data-platform/kafka/values.yaml +++ b/cluster/manifests/freeleaps-data-platform/kafka/values.yaml @@ -37,10 +37,11 @@ global: ## @param kubeVersion Override Kubernetes version ## -kubeVersion: "" +kubeVersion: "1.31.4" ## @param apiVersions Override Kubernetes API versions reported by .Capabilities ## -apiVersions: [] +apiVersions: + - "autoscaling.k8s.io/v1" ## @param nameOverride String to partially override common.names.fullname ## nameOverride: "" @@ -998,7 +999,7 @@ controller: vpa: ## @param controller.autoscaling.vpa.enabled Enable VPA ## - enabled: false + enabled: true ## @param controller.autoscaling.vpa.annotations Annotations for VPA resource ## annotations: {} @@ -1008,11 +1009,15 @@ controller: ## @param controller.autoscaling.vpa.maxAllowed VPA Max allowed resources for the pod ## cpu: 200m ## memory: 100Mi - maxAllowed: {} + maxAllowed: + cpu: 1000m + memory: 2048Mi ## @param controller.autoscaling.vpa.minAllowed VPA Min allowed resources for the pod ## cpu: 200m ## memory: 100Mi - minAllowed: {} + minAllowed: + cpu: 500m + memory: 1024Mi updatePolicy: ## @param controller.autoscaling.vpa.updatePolicy.updateMode Autoscaling update policy Specifies whether recommended updates are applied when a Pod is started and whether recommended updates are applied during the life of a Pod ## Possible values are "Off", "Initial", "Recreate", and "Auto". diff --git a/cluster/manifests/freeleaps-data-platform/kafka/vpa.yaml b/cluster/manifests/freeleaps-data-platform/kafka/vpa.yaml new file mode 100644 index 00000000..ed64fbdb --- /dev/null +++ b/cluster/manifests/freeleaps-data-platform/kafka/vpa.yaml @@ -0,0 +1,24 @@ +apiVersion: autoscaling.k8s.io/v1 +kind: VerticalPodAutoscaler +metadata: + name: kafka-controller-vpa + namespace: freeleaps-data-platform +spec: + resourcePolicy: + containerPolicies: + - containerName: '*' + controlledResources: + - cpu + - memory + maxAllowed: + cpu: 1000m + memory: 2048Mi + minAllowed: + cpu: 200m + memory: 512Mi + targetRef: + apiVersion: apps/v1 + kind: StatefulSet + name: kafka-controller + updatePolicy: + updateMode: "Auto" \ No newline at end of file diff --git a/cluster/manifests/freeleaps-data-platform/pinot/certificate.yaml b/cluster/manifests/freeleaps-data-platform/pinot/certificate.yaml deleted file mode 100644 index 0942c51c..00000000 --- a/cluster/manifests/freeleaps-data-platform/pinot/certificate.yaml +++ /dev/null @@ -1,13 +0,0 @@ -apiVersion: cert-manager.io/v1 -kind: Certificate -metadata: - name: pinot-dot-mathmast-dot-com - namespace: freeleaps-data-platform -spec: - commonName: pinot.mathmast.com - dnsNames: - - pinot.mathmast.com - issuerRef: - kind: ClusterIssuer - name: mathmast-dot-com - secretName: pinot-dot-mathmast-dot-com-tls \ No newline at end of file diff --git a/cluster/manifests/freeleaps-data-platform/pinot/values.yaml b/cluster/manifests/freeleaps-data-platform/pinot/values.yaml index 48aa8e6b..ecfc7ded 100644 --- a/cluster/manifests/freeleaps-data-platform/pinot/values.yaml +++ b/cluster/manifests/freeleaps-data-platform/pinot/values.yaml @@ -19,326 +19,110 @@ # Default values for Pinot. -namespaceOverride: -namespaceAnnotations: {} - image: repository: apachepinot/pinot - # Pinot docker images are available at https://hub.docker.com/r/apachepinot/pinot/tags - # - `latest` tag is always available and points to the nightly pinot master branch build - # - `release-x.y.z` or `x.y.z` tags are available for each release, e.g. release-1.0.0, release-0.12.1, 1.0.0, 0.12.1, etc. - # - # Default JDK comes with Amazon Corretto 11, here are also images with different JDKs: - # - Amazon Corretto 11, e.g. `latest-11`, `1.0.0-11`, `latest-11-amazoncorretto`, `1.0.0-11-amazoncorretto` - # - Amazon Corretto 17, e.g. `latest-17-amazoncorretto`, `1.0.0-17-amazoncorretto` - # - MS OpenJDK 11, e.g. `latest-11-ms-openjdk`, `1.0.0-11-ms-openjdk` - # - MS OpenJDK 17, e.g. `latest-17-ms-openjdk`, `1.0.0-17-ms-openjdk` - # - OpenJDK 21, e.g. `latest-21-openjdk`, `1.0.0-21-openjdk` - tag: latest # 1.0.0, 0.12.1, latest - pullPolicy: Always # Use IfNotPresent when you pinged a version of image tag + tag: 1.3.0 + pullPolicy: IfNotPresent cluster: - name: freeleaps + name: freeleaps-pinot -imagePullSecrets: [] - -terminationGracePeriodSeconds: 30 - -securityContext: {} - # capabilities: - # drop: - # - ALL - # readOnlyRootFilesystem: true - # runAsNonRoot: true - # runAsUser: 1000 - -# default values of the probes i.e. liveness and readiness. -# customization of values is present at the component level. -probes: - initialDelaySeconds: 60 - periodSeconds: 10 - failureThreshold: 10 - # should be 1 for liveness and startup probe, as per K8s doc. - successThreshold: 1 - timeoutSeconds: 10 - -serviceAccount: - # Specifies whether a service account should be created - create: true - # Annotations to add to the service account - annotations: {} - # The name of the service account to use. - # If not set and create is true, a name is generated using the fullname template - name: "" - -additionalMatchLabels: {} - - -pinotAuth: - enabled: false - controllerFactoryClass: org.apache.pinot.controller.api.access.BasicAuthAccessControlFactory - brokerFactoryClass: org.apache.pinot.broker.broker.BasicAuthAccessControlFactory - configs: - # - access.control.principals=admin,user - # - access.control.principals.admin.password=verysecret - # - access.control.principals.user.password=secret - # - access.control.principals.user.tables=baseballStats,otherstuff - # - access.control.principals.user.permissions=READ - -# ------------------------------------------------------------------------------ -# Pinot Controller: -# ------------------------------------------------------------------------------ controller: name: controller - # Controls whether controller.port is included in the configuration. - # Set to false to exclude controller.port when using TLS-only mode or when - # you want to specify the port in controller.access.protocols.https.port instead. - configureControllerPort: true + port: 9000 replicaCount: 1 - podManagementPolicy: Parallel - podSecurityContext: {} - # fsGroup: 2000 - securityContext: {} - startCommand: "StartController" - - probes: - endpoint: "/health" - livenessEnabled: false - readinessEnabled: false - startupEnabled: false - liveness: - initialDelaySeconds: 60 - failureThreshold: 10 - timeoutSeconds: 10 - successThreshold: 1 - periodSeconds: 10 - - readiness: - initialDelaySeconds: 60 - failureThreshold: 10 - timeoutSeconds: 10 - successThreshold: 1 - periodSeconds: 10 - - startup: - initialDelaySeconds: 60 - failureThreshold: 10 - timeoutSeconds: 10 - successThreshold: 1 - periodSeconds: 10 persistence: enabled: true accessMode: ReadWriteOnce - size: 1G + size: 5G mountPath: /var/pinot/controller/data - storageClass: "" - extraVolumes: [] - extraVolumeMounts: [] + storageClass: "azure-disk-std-lrs" data: dir: /var/pinot/controller/data + vip: - enabled: false host: pinot-controller port: 9000 - jvmOpts: "-XX:ActiveProcessorCount=2 -Xms256M -Xmx1G -XX:+UseG1GC -XX:MaxGCPauseMillis=200 -Xlog:gc*:file=/opt/pinot/gc-pinot-controller.log -Djute.maxbuffer=4000000" + jvmOpts: "-Xms256M -Xmx1G" - log4j2ConfFile: /opt/pinot/etc/conf/pinot-controller-log4j2.xml + log4j2ConfFile: /opt/pinot/conf/pinot-controller-log4j2.xml pluginsDir: /opt/pinot/plugins - pdb: - enabled: false - minAvailable: "" - maxUnavailable: 50% - service: annotations: {} - clusterIP: "None" + clusterIP: "" externalIPs: [] loadBalancerIP: "" loadBalancerSourceRanges: [] type: ClusterIP port: 9000 nodePort: "" - protocol: TCP - name: controller - extraPorts: [] - # - containerPort: 1234 - # protocol: PROTOCOL - # name: extra-port external: - enabled: true + enabled: false type: LoadBalancer port: 9000 - annotations: {} - - ingress: - v1beta1: - enabled: false - annotations: { } - tls: { } - path: / - hosts: [ ] - # port: 9433 - v1: - enabled: false - ingressClassName: "" - annotations: {} - tls: [] - path: / - hosts: [] - # port: 9433 resources: requests: - memory: "1.25Gi" + cpu: 200m + memory: 256Mi + limits: + cpu: 500m + memory: 1Gi nodeSelector: {} tolerations: [] - initContainers: [] - affinity: {} podAnnotations: {} - # set enabled as true, to automatically roll controller stateful set for configmap change - automaticReload: - enabled: false - updateStrategy: type: RollingUpdate - # Use envFrom to define all of the ConfigMap or Secret data as container environment variables. - # ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-pod-configmap/#configure-all-key-value-pairs-in-a-configmap-as-container-environment-variables - # ref: https://kubernetes.io/docs/tasks/inject-data-application/distribute-credentials-secure/#configure-all-key-value-pairs-in-a-secret-as-container-environment-variables - envFrom: [] - # - configMapRef: - # name: special-config - # - secretRef: - # name: test-secret - - # Use extraEnv to add individual key value pairs as container environment variables. - # ref: https://kubernetes.io/docs/tasks/inject-data-application/define-environment-variable-container/ - extraEnv: - - name: LOG4J_CONSOLE_LEVEL - value: info - # - name: PINOT_CUSTOM_ENV - # value: custom-value - - # Extra configs will be appended to pinot-controller.conf file - extra: - configs: |- - pinot.set.instance.id.to.hostname=true - controller.task.scheduler.enabled=true - -# ------------------------------------------------------------------------------ -# Pinot Broker: -# ------------------------------------------------------------------------------ broker: name: broker - # Controls whether pinot.broker.client.queryPort is included in the configuration. - # Set to false to exclude pinot.broker.client.queryPort when using TLS-only mode or when - # you want to specify the port in pinot.broker.access.protocols.https.port instead. - configureBrokerPort: true + + port: 8099 + replicaCount: 1 - podManagementPolicy: Parallel - podSecurityContext: {} - # fsGroup: 2000 - securityContext: {} - startCommand: "StartBroker" - jvmOpts: "-XX:ActiveProcessorCount=2 -Xms256M -Xmx1G -XX:+UseG1GC -XX:MaxGCPauseMillis=200 -Xlog:gc*:file=/opt/pinot/gc-pinot-broker.log -Djute.maxbuffer=4000000" + jvmOpts: "-Xms256M -Xmx1G" - log4j2ConfFile: /opt/pinot/etc/conf/pinot-broker-log4j2.xml + log4j2ConfFile: /opt/pinot/conf/pinot-broker-log4j2.xml pluginsDir: /opt/pinot/plugins routingTable: builderClass: random - probes: - endpoint: "/health" - livenessEnabled: true - readinessEnabled: true - startupEnabled: false - liveness: - initialDelaySeconds: 60 - failureThreshold: 10 - timeoutSeconds: 10 - successThreshold: 1 - periodSeconds: 10 - - readiness: - initialDelaySeconds: 60 - failureThreshold: 10 - timeoutSeconds: 10 - successThreshold: 1 - periodSeconds: 10 - - startup: - initialDelaySeconds: 60 - failureThreshold: 10 - timeoutSeconds: 10 - successThreshold: 1 - periodSeconds: 10 - - persistence: - extraVolumes: [] - extraVolumeMounts: [] - - pdb: - enabled: false - minAvailable: "" - maxUnavailable: 50% - service: annotations: {} - clusterIP: "None" + clusterIP: "" externalIPs: [] loadBalancerIP: "" loadBalancerSourceRanges: [] type: ClusterIP - protocol: TCP port: 8099 - name: broker nodePort: "" - extraPorts: [] - # - containerPort: 1234 - # protocol: PROTOCOL - # name: extra-port external: - enabled: true + enabled: false type: LoadBalancer port: 8099 - # For example, in private GKE cluster, you might add cloud.google.com/load-balancer-type: Internal - annotations: {} - - ingress: - v1beta1: - enabled: false - annotations: {} - tls: {} - path: / - hosts: [] - # port: 8443 - v1: - enabled: false - ingressClassName: "" - annotations: {} - tls: [] - path: / - hosts: [] - # port: 8443 resources: requests: - memory: "1.25Gi" + cpu: 200m + memory: 256Mi + limits: + cpu: 500m + memory: 1Gi nodeSelector: {} @@ -346,85 +130,19 @@ broker: tolerations: [] - initContainers: [] - podAnnotations: {} - # set enabled as true, to automatically roll broker stateful set for configmap change - automaticReload: - enabled: false - updateStrategy: type: RollingUpdate - # Use envFrom to define all of the ConfigMap or Secret data as container environment variables. - # ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-pod-configmap/#configure-all-key-value-pairs-in-a-configmap-as-container-environment-variables - # ref: https://kubernetes.io/docs/tasks/inject-data-application/distribute-credentials-secure/#configure-all-key-value-pairs-in-a-secret-as-container-environment-variables - envFrom: [] - # - configMapRef: - # name: special-config - # - secretRef: - # name: test-secret - - # Use extraEnv to add individual key value pairs as container environment variables. - # ref: https://kubernetes.io/docs/tasks/inject-data-application/define-environment-variable-container/ - extraEnv: - - name: LOG4J_CONSOLE_LEVEL - value: info - # - name: PINOT_CUSTOM_ENV - # value: custom-value - - # Extra configs will be appended to pinot-broker.conf file - extra: - configs: |- - pinot.set.instance.id.to.hostname=true - pinot.query.server.port=7321 - pinot.query.runner.port=7732 - -# ------------------------------------------------------------------------------ -# Pinot Server: -# ------------------------------------------------------------------------------ server: name: server - # Controls whether pinot.server.netty.port is included in the configuration. - # Set to false to exclude pinot.server.netty.port when using TLS-only mode or when - # you want to specify the port in pinot.server.nettytls.port instead. - configureServerPort: true + + ports: + netty: 8098 + admin: 8097 + replicaCount: 1 - podManagementPolicy: Parallel - podSecurityContext: {} - # fsGroup: 2000 - securityContext: {} - startCommand: "StartServer" - - probes: - endpoint: "/health" - livenessEnabled: false - readinessEnabled: false - startupEnabled: false - liveness: - endpoint: "/health/liveness" - initialDelaySeconds: 60 - failureThreshold: 10 - timeoutSeconds: 10 - successThreshold: 1 - periodSeconds: 10 - - readiness: - endpoint: "/health/readiness" - initialDelaySeconds: 60 - failureThreshold: 10 - timeoutSeconds: 10 - successThreshold: 1 - periodSeconds: 10 - - startup: - endpoint: "/health/liveness" - initialDelaySeconds: 60 - failureThreshold: 10 - timeoutSeconds: 10 - successThreshold: 1 - periodSeconds: 10 dataDir: /var/pinot/server/data/index segmentTarDir: /var/pinot/server/data/segment @@ -432,23 +150,15 @@ server: persistence: enabled: true accessMode: ReadWriteOnce - size: 4G + size: 5G mountPath: /var/pinot/server/data - storageClass: "" - #storageClass: "ssd" - extraVolumes: [] - extraVolumeMounts: [] + storageClass: "azure-disk-std-lrs" - jvmOpts: "-Xms512M -Xmx1G -XX:+UseG1GC -XX:MaxGCPauseMillis=200 -Xlog:gc*:file=/opt/pinot/gc-pinot-server.log -Djute.maxbuffer=4000000" + jvmOpts: "-Xms512M -Xmx1G" - log4j2ConfFile: /opt/pinot/etc/conf/pinot-server-log4j2.xml + log4j2ConfFile: /opt/pinot/conf/pinot-server-log4j2.xml pluginsDir: /opt/pinot/plugins - pdb: - enabled: false - minAvailable: "" - maxUnavailable: 1 - service: annotations: {} clusterIP: "" @@ -456,21 +166,16 @@ server: loadBalancerIP: "" loadBalancerSourceRanges: [] type: ClusterIP - nettyPort: 8098 - nettyPortName: netty - adminPort: 8097 - adminExposePort: 80 - adminPortName: admin + port: 8098 nodePort: "" - protocol: TCP - extraPorts: [] - # - containerPort: 1234 - # protocol: PROTOCOL - # name: extra-port resources: requests: - memory: "1.25Gi" + cpu: 200m + memory: 512Mi + limits: + cpu: 500m + memory: 1Gi nodeSelector: {} @@ -478,320 +183,46 @@ server: tolerations: [] - initContainers: [] - podAnnotations: {} - # set enabled as true, to automatically roll server stateful set for configmap change - automaticReload: - enabled: false - updateStrategy: type: RollingUpdate - # Use envFrom to define all of the ConfigMap or Secret data as container environment variables. - # ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-pod-configmap/#configure-all-key-value-pairs-in-a-configmap-as-container-environment-variables - # ref: https://kubernetes.io/docs/tasks/inject-data-application/distribute-credentials-secure/#configure-all-key-value-pairs-in-a-secret-as-container-environment-variables - envFrom: [] - # - configMapRef: - # name: special-config - # - secretRef: - # name: test-secret - - # Use extraEnv to add individual key value pairs as container environment variables. - # ref: https://kubernetes.io/docs/tasks/inject-data-application/define-environment-variable-container/ - extraEnv: - - name: LOG4J_CONSOLE_LEVEL - value: info - # - name: PINOT_CUSTOM_ENV - # value: custom-value - - # Extra configs will be appended to pinot-server.conf file - extra: - configs: |- - pinot.set.instance.id.to.hostname=true - pinot.server.instance.realtime.alloc.offheap=true - pinot.query.server.port=7321 - pinot.query.runner.port=7732 - -# ------------------------------------------------------------------------------ -# Pinot Minion: -# ------------------------------------------------------------------------------ -minion: - enabled: false - name: minion - # Controls whether pinot.minion.port is included in the configuration. - # Set to false to exclude pinot.minion.port when using TLS-only mode - # or when you're configuring ports through another mechanism. - configureMinionPort: true - replicaCount: 0 - podManagementPolicy: Parallel - podSecurityContext: {} - # fsGroup: 2000 - securityContext: {} - startCommand: "StartMinion" - - probes: - endpoint: "/health" - livenessEnabled: true - readinessEnabled: true - startupEnabled: false - liveness: - initialDelaySeconds: 60 - failureThreshold: 10 - timeoutSeconds: 10 - successThreshold: 1 - periodSeconds: 10 - - readiness: - initialDelaySeconds: 60 - failureThreshold: 10 - timeoutSeconds: 10 - successThreshold: 1 - periodSeconds: 10 - - startup: - initialDelaySeconds: 60 - failureThreshold: 10 - timeoutSeconds: 10 - successThreshold: 1 - periodSeconds: 10 - - dataDir: /var/pinot/minion/data - jvmOpts: "-XX:ActiveProcessorCount=2 -Xms256M -Xmx1G -XX:+UseG1GC -XX:MaxGCPauseMillis=200 -Xlog:gc*:file=/opt/pinot/gc-pinot-minion.log -Djute.maxbuffer=4000000" - - log4j2ConfFile: /opt/pinot/etc/conf/pinot-minion-log4j2.xml - pluginsDir: /opt/pinot/plugins - - persistence: - enabled: true - accessMode: ReadWriteOnce - size: 4G - mountPath: /var/pinot/minion/data - storageClass: "" - #storageClass: "ssd" - extraVolumes: [] - extraVolumeMounts: [] - - service: - annotations: {} - clusterIP: "" - externalIPs: [] - loadBalancerIP: "" - loadBalancerSourceRanges: [] - type: ClusterIP - port: 9514 - nodePort: "" - protocol: TCP - name: minion - extraPorts: [] - # - containerPort: 1234 - # protocol: PROTOCOL - # name: extra-port - - resources: - requests: - memory: "1.25Gi" - - nodeSelector: {} - - affinity: {} - - tolerations: [] - - initContainers: [] - - podAnnotations: {} - - automaticReload: - enabled: false - - updateStrategy: - type: RollingUpdate - - # Use envFrom to define all of the ConfigMap or Secret data as container environment variables. - # ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-pod-configmap/#configure-all-key-value-pairs-in-a-configmap-as-container-environment-variables - # ref: https://kubernetes.io/docs/tasks/inject-data-application/distribute-credentials-secure/#configure-all-key-value-pairs-in-a-secret-as-container-environment-variables - envFrom: [] - # - configMapRef: - # name: special-config - # - secretRef: - # name: test-secret - - # Use extraEnv to add individual key value pairs as container environment variables. - # ref: https://kubernetes.io/docs/tasks/inject-data-application/define-environment-variable-container/ - extraEnv: - - name: LOG4J_CONSOLE_LEVEL - value: info - # - name: PINOT_CUSTOM_ENV - # value: custom-value - - # Extra configs will be appended to pinot-minion.conf file - extra: - configs: |- - pinot.set.instance.id.to.hostname=true - - -# ------------------------------------------------------------------------------ -# Pinot Minion Stateless: -# ------------------------------------------------------------------------------ -minionStateless: - enabled: true - name: minion-stateless - # Controls whether pinot.minion.port is included in the configuration. - # Set to false to exclude pinot.minion.port when using TLS-only mode - # or when you're configuring ports through another mechanism. - configureMinionStatelessPort: true - replicaCount: 1 - podSecurityContext: {} - # fsGroup: 2000 - securityContext: {} - startCommand: "StartMinion" - - probes: - endpoint: "/health" - livenessEnabled: true - readinessEnabled: true - startupEnabled: true - liveness: - initialDelaySeconds: 60 - failureThreshold: 10 - timeoutSeconds: 10 - successThreshold: 1 - periodSeconds: 10 - - readiness: - initialDelaySeconds: 60 - failureThreshold: 10 - timeoutSeconds: 10 - successThreshold: 1 - periodSeconds: 10 - - startup: - initialDelaySeconds: 60 - failureThreshold: 10 - timeoutSeconds: 10 - successThreshold: 1 - periodSeconds: 10 - - dataDir: /var/pinot/minion/data - jvmOpts: "-XX:ActiveProcessorCount=2 -Xms256M -Xmx1G -XX:+UseG1GC -XX:MaxGCPauseMillis=200 -Xlog:gc*:file=/opt/pinot/gc-pinot-minion.log -Djute.maxbuffer=4000000" - - log4j2ConfFile: /opt/pinot/etc/conf/pinot-minion-log4j2.xml - pluginsDir: /opt/pinot/plugins - - persistence: - enabled: false - pvcName: minion-data-vol - accessMode: ReadWriteOnce - size: 4G - mountPath: /var/pinot/minion/data - storageClass: "" - #storageClass: "ssd" - extraVolumes: [] - extraVolumeMounts: [] - - service: - port: 9514 - protocol: TCP - name: minion - extraPorts: [] - # - containerPort: 1234 - # protocol: PROTOCOL - # name: extra-port - - resources: - requests: - memory: "1.25Gi" - - nodeSelector: {} - - affinity: {} - - tolerations: [] - - initContainers: [] - - podAnnotations: {} - - # Use envFrom to define all of the ConfigMap or Secret data as container environment variables. - # ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-pod-configmap/#configure-all-key-value-pairs-in-a-configmap-as-container-environment-variables - # ref: https://kubernetes.io/docs/tasks/inject-data-application/distribute-credentials-secure/#configure-all-key-value-pairs-in-a-secret-as-container-environment-variables - envFrom: [] - # - configMapRef: - # name: special-config - # - secretRef: - # name: test-secret - - # Use extraEnv to add individual key value pairs as container environment variables. - # ref: https://kubernetes.io/docs/tasks/inject-data-application/define-environment-variable-container/ - extraEnv: - - name: LOG4J_CONSOLE_LEVEL - value: info - # - name: PINOT_CUSTOM_ENV - # value: custom-value - - # Extra configs will be appended to pinot-minion.conf file - extra: - configs: |- - pinot.set.instance.id.to.hostname=true - # ------------------------------------------------------------------------------ # Zookeeper: -# NOTE: IN PRODUCTION USE CASES, IT's BEST TO USE ZOOKEEPER K8S OPERATOR -# ref: https://github.com/pravega/zookeeper-operator#install-the-operator # ------------------------------------------------------------------------------ zookeeper: ## If true, install the Zookeeper chart alongside Pinot - ## ref: https://github.com/bitnami/charts/tree/master/bitnami/zookeeper - enabled: true - - ## If the Zookeeper Chart is disabled a URL override is required to connect - urlOverride: "my-zookeeper:2181/my-pinot" - - ## Zookeeper port - port: 2181 + ## ref: https://github.com/kubernetes/charts/tree/master/incubator/zookeeper + enabled: false ## Configure Zookeeper resource requests and limits ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ - resources: - requests: - memory: "1.25Gi" + resources: {} ## Replicas replicaCount: 1 - ## Ongoing data directory cleanup configuration - autopurge: - - ## The time interval (in hours) for which the purge task has to be triggered - ## Set to a positive integer to enable the auto purging - purgeInterval: 1 - - ## The most recent snapshots amount (and corresponding transaction logs) to retain - snapRetainCount: 5 - - ## Size (in MB) for the Java Heap options (Xmx and Xms) - ## This env var is ignored if Xmx an Xms are configured via `zookeeper.jvmFlags` - heapSize: "1024" - - ## Extra JVM Flags for Zookeeper - jvmFlags: "-Djute.maxbuffer=4000000" + ## Environmental variables to set in Zookeeper + env: + ## The JVM heap size to allocate to Zookeeper + ZK_HEAP_SIZE: "256M" persistence: enabled: true - storageClass: "" - #storageClass: "ssd" - ## The amount of PV storage allocated to each Zookeeper pod in the statefulset - size: "8Gi" + # size: "2Gi" ## Specify a Zookeeper imagePullPolicy ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images image: PullPolicy: "IfNotPresent" + ## If the Zookeeper Chart is disabled a URL and port are required to connect + url: "zookeeper-headless.freeleaps-data-platform.svc.freeleaps.cluster" + port: 2181 + ## Pod scheduling preferences (by default keep pods within a release on separate nodes). ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity ## By default we don't set affinity: diff --git a/cluster/manifests/freeleaps-data-platform/pinot/vpa.yaml b/cluster/manifests/freeleaps-data-platform/pinot/vpa.yaml new file mode 100644 index 00000000..b03c0669 --- /dev/null +++ b/cluster/manifests/freeleaps-data-platform/pinot/vpa.yaml @@ -0,0 +1,87 @@ +apiVersion: autoscaling.k8s.io/v1 +kind: VerticalPodAutoscaler +metadata: + name: pinot-controller-vpa + namespace: freeleaps-data-platform +spec: + resourcePolicy: + containerPolicies: + - containerName: '*' + controlledResources: + - cpu + - memory + maxAllowed: + cpu: 500m + memory: 1Gi + targetRef: + apiVersion: apps/v1 + kind: StatefulSet + name: pinot-controller + updatePolicy: + updateMode: "Auto" +--- +apiVersion: autoscaling.k8s.io/v1 +kind: VerticalPodAutoscaler +metadata: + name: pinot-broker-vpa + namespace: freeleaps-data-platform +spec: + resourcePolicy: + containerPolicies: + - containerName: '*' + controlledResources: + - cpu + - memory + maxAllowed: + cpu: 500m + memory: 1.5Gi + targetRef: + apiVersion: apps/v1 + kind: StatefulSet + name: pinot-broker + updatePolicy: + updateMode: "Auto" +--- +apiVersion: autoscaling.k8s.io/v1 +kind: VerticalPodAutoscaler +metadata: + name: pinot-server-vpa + namespace: freeleaps-data-platform +spec: + resourcePolicy: + containerPolicies: + - containerName: '*' + controlledResources: + - cpu + - memory + maxAllowed: + cpu: 500m + memory: 1Gi + targetRef: + apiVersion: apps/v1 + kind: StatefulSet + name: pinot-server + updatePolicy: + updateMode: "Auto" +--- +apiVersion: autoscaling.k8s.io/v1 +kind: VerticalPodAutoscaler +metadata: + name: pinot-minion-stateless-vpa + namespace: freeleaps-data-platform +spec: + resourcePolicy: + containerPolicies: + - containerName: '*' + controlledResources: + - cpu + - memory + maxAllowed: + cpu: 500m + memory: 1Gi + targetRef: + apiVersion: apps/v1 + kind: Deployment + name: pinot-minion-stateless + updatePolicy: + updateMode: "Auto" \ No newline at end of file diff --git a/cluster/manifests/freeleaps-data-platform/star-rocks-operator/values.yaml b/cluster/manifests/freeleaps-data-platform/star-rocks-operator/values.yaml new file mode 100644 index 00000000..498dad47 --- /dev/null +++ b/cluster/manifests/freeleaps-data-platform/star-rocks-operator/values.yaml @@ -0,0 +1,108 @@ +# Default values for operator. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +global: + rbac: + # if set true, the clusterrole, clusterrolebinding, serviceaccount resources will be created for + # operator. If changed to false later, these resources will be deleted. + # Note: By default the operator will watch all namespaces, so it needs clusterrole, clusterrolebinding to access resources. + # If .Values.starrocksOperator.watchNamespace is set, the role and rolebinding will be created for the specified namespace. + create: true + serviceAccount: + name: "starrocks" + # Optional annotations to add to serviceaccount manifest + annotations: {} + # Optional labels to add to serviceaccount manifest + labels: {} + +# TimeZone is used to set the environment variable TZ for pod, with Asia/Shanghai as the default. +timeZone: Asia/Shanghai + +# set the nameOverride values for creating the same resources with parent chart. +# In version v1.7.1 or before, there is only one chart called kube-starrocks, and the chart name is the prefix +# of some resources created by the chart. +# In version v1.8.0, the kube-starrocks chart is split into two charts, and to keep backward compatibility, the +# nameOverride is used to set the prefix of the resources created by operator chart. +nameOverride: "kube-starrocks" + +starrocksOperator: + # If enabled, the operator releated resources will be created, including the operator deployment, service account, + # clusterrole, clusterrolebinding, and service account. + enabled: true + # annotations for starrocks operator. + annotations: {} + namespaceOverride: "" + image: + # image sliced by "repository:tag" + repository: starrocks/operator + tag: v1.10.2 + imagePullPolicy: Always + replicaCount: 1 + resources: + limits: + cpu: 500m + memory: 800Mi + requests: + cpu: 500m + memory: 400Mi + # By default, the operator will only set runAsNonRoot to true, allowPrivilegeEscalation to false, readOnlyRootFilesystem to true. + # You can customize the securityContext for operator pod, e.g. drop capabilities, seccompProfile, etc. + securityContext: + runAsNonRoot: true + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + #capabilities: + # drop: + # - ALL + #seccompProfile: + # type: "RuntimeDefault" + # imagePullSecrets allows you to use secrets to pull images for pods. + imagePullSecrets: [] + # - name: "image-pull-secret" + # If specified, the pod's nodeSelector,displayName="Map of nodeSelectors to match when scheduling pods on nodes" + # Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector + nodeSelector: {} + # affinity for operator pod scheduling. + affinity: {} + # nodeAffinity: + # requiredDuringSchedulingIgnoredDuringExecution: + # nodeSelectorTerms: + # - matchFields: + # - key: metadata.name + # operator: In + # values: + # - target-host-name + tolerations: [] + # - key: "key" + # operator: "Equal|Exists" + # value: "value" + # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" + # If specified, it will restrict operator to watch resources in the specified namespace. And + # 1. You must deploy your StarRocks cluster to the specified namespace. + # 2. You can not assign multiple namespaces to watchNamespace field. + # Note: In most cases, you should not set this value. If your kubernetes cluster manages too many nodes, and + # operator watching all namespaces use too many memory resources, you can set this value. + # Defaults to all namespaces. + watchNamespace: "freeleaps-data-platform" + # Additional operator container environment variables + # You specify this manually like you would a raw deployment manifest. + # Ref: https://kubernetes.io/docs/tasks/inject-data-application/define-environment-variable-container/ + # operator supports the following environment variables: + # KUBE_STARROCKS_UNSUPPORTED_ENVS: "XXX,YYY" # The environment variables that will not be passed to starrocks container. + env: [] + # setting log related parameter + log: + - --zap-time-encoding=iso8601 + - --zap-encoder=console + # if you want open debug log, open this option + # - --zap-log-level 4 + # Operator need to specify the FQDN in nginx.conf when it set up fe-proxy service. + # By default, Operator will use cluster.local as the dnsDomainSuffix. + # If you set up a kubernetes cluster with a different dnsDomainSuffix, you need to set this value. + dnsDomainSuffix: "" + # By default, the volume name of secret and configmap created by operator for the FE/BE/CN pods has a hash suffix. + # If users plan to use a sidecar or init container to mount the same volume, it will be difficult to get the volume name. + # In this situation, you can set this value to false. + volumeNameWithHash: true + diff --git a/cluster/manifests/freeleaps-data-platform/star-rocks/values.yaml b/cluster/manifests/freeleaps-data-platform/star-rocks/values.yaml index b46fef1c..7704c8c4 100644 --- a/cluster/manifests/freeleaps-data-platform/star-rocks/values.yaml +++ b/cluster/manifests/freeleaps-data-platform/star-rocks/values.yaml @@ -1,32 +1,1269 @@ -starrocks: - initPassword: - enabled: true - # 设置密码 secret,例如: - # kubectl create secret generic starrocks-root-pass --from-literal=password='g()()dpa$$word' - passwordSecret: starrocks-root-pass +# set the nameOverride values for creating the same resources with parent chart. +# In version v1.7.1 or before, there is only one chart called kube-starrocks, and the chart name is the prefix +# of some resources created by the chart. +# In version v1.8.0, the kube-starrocks chart is split into two charts, and to keep backward compatibility, the +# nameOverride is used to set the prefix of the resources created by starrocks chart. +nameOverride: "" - starrocksFESpec: - replicas: 3 - service: - type: LoadBalancer - resources: - requests: - cpu: 1 - memory: 1Gi - storageSpec: - name: fe +# This configuration is used to modify the root password during initial deployment. +# After deployment is completed, it won't take effect to modify the password here and to do a `helm upgrade`. +# It also supports providing secret name that contains password, using the password in the secret instead of the plaintext in the values.yaml. +# When both password and passwordSecret are set, only passwordSecret takes effect. +# Note: If you install StarRocks using helm install without setting the initPassword, then for subsequent upgrade deployments, +# you should also not set the initPassword. +# If you install StarRocks using helm install and set the initPassword, please always retain the configuration of initPassword. +initPassword: + enabled: false + # Note: If you are using Argo CD to deploy the StarRocks cluster, you must set isInstall to false after the first installation. + # This is because Argo CD support helm like this: helm template | kubectl apply -f -. If isInstall is true, the + # initPassword job will be executed every time you run the command. + # see https://github.com/argoproj/argo-cd/discussions/7496#discussioncomment-1609267 for more information + isInstall: true + password: "" + # The secret name that contains password, the key of the secret is "password", and you should create it first. + passwordSecret: "" + # The image of the initPassword job, if it is not set, the FE image will be used. + # see https://github.com/StarRocks/starrocks-kubernetes-operator/issues/453 for why we need to set the image. + image: "" + # The annotations for the Job, not including the annotations for the pod. + annotations: {} + # The annotations for the Job's Pod, not including the annotations for the job. + podAnnotations: {} + # resources for init_job pod. + resources: {} + #resources: + # requests: + # cpu: 500m + # memory: 400Mi + # limits: + # cpu: 500m + # memory: 800Mi - starrocksBeSpec: - replicas: 3 - resources: - requests: - cpu: 1 - memory: 2Gi - storageSpec: - name: be - storageSize: 15Gi +# TimeZone is used to set the environment variable TZ for pod, with Asia/Shanghai as the default. +timeZone: UTC - starrocksFeProxySpec: - enabled: true - service: - type: LoadBalancer \ No newline at end of file +# This configuration is used to integrate with external system DataDog. +# You can enable the integration by setting the enabled to true, e.g. datalog.log.enabled=true will enable datadog agent +# to collect the log. +datadog: + log: + enabled: false + # besides the attributes you added, chart will append "source" and "service" attributes to the log config. + # see https://docs.datadoghq.com/containers/kubernetes/log/?tab=operator for more details. + logConfig: '{}' # e.g. '{"app": "starrocks", "tags": ["aa", "bb"]}' + metrics: + enabled: false + profiling: + fe: false # change to 'true' to enable profiling on FE pods; + be: false # change to 'true' to enable profiling on BE pods; + cn: false # change to 'true' to enable profiling on CN pods; + env: "starrocks-default" # the default value for DD_ENV; + configMode: "service" # see https://docs.datadoghq.com/containers/cluster_agent/admission_controller/?tab=operator#configure-apm-and-dogstatsd-communication-mode + +# This configuration is used to integrate with external system Prometheus. +metrics: + serviceMonitor: + # Whether to expose metrics to Prometheus by ServiceMonitor. + # Note: make sure the prometheus operator is installed in your cluster. + # If prometheus is not installed by operator, you can add annotations on k8s service to expose metrics. + # see https://github.com/StarRocks/starrocks-kubernetes-operator/blob/main/doc/integration/integration-prometheus-grafana.md#51-turn-on-the-prometheus-metrics-scrape-by-adding-annotations for more details. + enabled: false + # Prometheus ServiceMonitor labels + labels: {} + # scraper: prometheus-operator + # Prometheus ServiceMonitor interval + interval: 15s + # Whether to enable basic auth + basicAuth: + enabled: false + # The name of the secret that contains the username for basic auth. + # The secret should contain a key named "username". + usernameSecretName: "" + # The key in the secret that contains the username for basic auth. + usernameSecretKey: "" + # The name of the secret that contains the password for basic auth. + # The secret should contain a key named "password". + passwordSecretName: "" + # The key in the secret that contains the password for basic auth. + passwordSecretKey: "" + # Whether to enable request parameters for the ServiceMonitor. + endpointParam: + enabled: false + # the parameters for the ServiceMonitor. + # params: + # with_materialized_view_metrics: + # - "all" + params: {} + +# deploy a starrocks cluster +starrocksCluster: + # the name of starrockscluster cluster, if not set, the value of nameOverride fields will be used. + name: "freeleaps-starrocks" + # the namespace of starrockscluster cluster, if not set, the release namespace will be used. + namespace: "freeleaps-data-platform" + # annotations for starrocks cluster. + annotations: {} + # specify the cn deploy or not. + enabledBe: true + enabledCn: false + # disaster recovery configuration. If you want to enable disaster recovery, you need to set the enabled field to true. + # Note: + # 1. If you are using an existing StarRocks cluster, you need to clean up the meta of the FE component and the data of the CN + # component before enabling disaster recovery. So it is better to use an empty StarRocks cluster to start disaster recovery. + # 2. After disaster recovery, Operator will reboot the cluster as a normal cluster automatically, so if you need more checks by yourself, + # you can + # 1. set the replicas of FE component to 1 + # 2. set enabledBe and enabledCn to be false to disable to deploy BE and CN components. + # 3. generation field is used to run multiple times for disaster recovery. For example, if the last disaster recovery is + # not what you want, you can modify related configurations and increase the generation value to run a new disaster recovery. + disasterRecovery: + # enabled: true + # generation: 1 + # componentValues field is used to define values for all starrocks cluster components, including starrocksFESpec, + # starrocksBeSpec, starrocksCnSpec, not including starrocksFeProxySpec. So that you do not need to modify them in + # their own spec. + # Note: + # 1. the values in their own spec will take precedence over the values in this field. + # 2. the values in their own spec will replace all the values in this field, not merge. + componentValues: + image: + tag: "3.3-latest" + # hostAliases allows adding entries to /etc/hosts inside the containers. + hostAliases: [] + # - ip: "127.0.0.1" + # hostnames: + # - "example.com" + # If runAsNonRoot is true, the container is run as non-root user. + # The userId will be set to 1000, and the groupID will be set to 1000. + # Note: If you have started the container through root, and then FE/BE began to create directories, write files, etc. + # under the mounted directory as root. When you start the container as a non-root user, the container will not + # have permission to access these files. So you'd better set runAsNonRoot to true when you set up the cluster. + # schedulerName allows you to specify which scheduler will be used for your pods. + schedulerName: "" + # serviceAccount for access cloud service. + serviceAccount: "" + # imagePullSecrets allows you to use secrets to pull images for pods. + imagePullSecrets: [] + # - name: "image-pull-secret" + # tolerations for pod scheduling to nodes with taints + # Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + tolerations: [] + # - key: "key" + # operator: "Equal|Exists" + # value: "value" + # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" + # topologySpreadConstraints for scheduling pods across failure-domains. + # Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/topology-spread-constraints/ + topologySpreadConstraints: [] + # - maxSkew: 1 + # topologyKey: topology.kubernetes.io/zone + # whenUnsatisfiable: DoNotSchedule|ScheduleAnyway + # labelSelector: + # matchLabels: + # foo: bar + # If specified, the pod's nodeSelector,displayName="Map of nodeSelectors to match when scheduling pods on nodes" + # Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector + nodeSelector: {} + # kubernetes.io/arch: amd64 + # kubernetes.io/os: linux + # affinity for pod scheduling. + affinity: {} + # nodeAffinity: + # requiredDuringSchedulingIgnoredDuringExecution: + # nodeSelectorTerms: + # - matchFields: + # - key: metadata.name + # operator: In + # values: + # - target-host-name + # podAntiAffinity: + # requiredDuringSchedulingIgnoredDuringExecution: + # - labelSelector: + # matchExpressions: + # - key: app.kubernetes.io/component + # operator: In + # values: + # - fe + # - be + # - cn + # topologyKey: "kubernetes.io/hostname" + # the pod labels for user select or classify pods. + podLabels: {} + +# spec to deploy fe. +starrocksFESpec: + # number of replicas to deploy for a fe statefulset. + replicas: 1 + image: + # image sliced by "repository:tag" + repository: starrocks/fe-ubuntu + tag: "" + imagePullPolicy: IfNotPresent + # Specify the entrypoint for FE. + # By default, operator will use '/opt/starrocks/fe_entrypoint.sh' as command, and use '$(FE_SERVICE_NAME)' as args in container spec. + # If entrypoint is set, the command will be ["bash", "-c"], and the args will be filename of the entrypoint script. + # A configmap with name $cluster-fe-entrypoint-script will be created, and the script will be mounted to /etc/starrocks/entrypoint.sh + # Pod will be restarted if the entrypoint script is updated. + entrypoint: {} + # script: | + # #! /bin/bash + # echo "do something before start fe" + # exec /opt/starrocks/fe_entrypoint.sh $FE_SERVICE_NAME + # add annotations for fe pods. For example, if you want to config monitor for datadog, you can config the annotations. + annotations: {} + # If runAsNonRoot is true, the container is run as non-root user. + # The userId will be set to 1000, and the groupID will be set to 1000. + # Note: If you have started the container through root, and then FE/BE began to create directories, write files, etc. + # under the mounted directory as root. When you start the container as a non-root user, the container will not + # have permission to access these files. So you'd better set runAsNonRoot to true when you set up the cluster. + runAsNonRoot: false + # Whether this container has a read-only root filesystem. + # Note: The FE/BE/CN container should support read-only root filesystem. The newest version of FE/BE/CN is 3.3.6, and does not support read-only root filesystem. + readOnlyRootFilesystem: false + # add/drop capabilities for FE container. + capabilities: {} + # add: + # - PERFMON + # - SYS_PTRACE + # drop: + # - SYS_ADMIN + # set sysctls for fe pod. + # See https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/ for how to set sysctls. + # Note: The use of unsafe sysctls is at-your-own-risk and can lead to severe problems + sysctls: [] + # - name: net.ipv4.ip_unprivileged_port_start + # value: "2048" + # specify the service name and port config and serviceType + # the service type refer https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types + service: + # the fe service type, only supported ClusterIP, NodePort, LoadBalancer + type: "ClusterIP" + # the loadBalancerIP for static ip config when the type=LoadBalancer and loadbalancerIp is not empty. + loadbalancerIP: "" + # add annotations for external fe service. + annotations: {} + # Add labels for external fe service. The operator may add its own default labels. + labels: {} + # config the service port for fe service. + # To assign a specific port or nodePort to a service, you should specify them by the corresponding name or + # containerPort in the service configuration. If both containerPort and name are specified, containerPort takes precedence. + # For fe, port name can be http, query, rpc, edit-log, and their default container port is 8030, 9030, 9020, 9010. + ports: [] + # e.g. specify a dedicated node port for fe service by containerPort. + # - nodePort: 30030 # The range of valid ports is 30000-32767 + # containerPort: 8030 # The port exported on the container + # specify the source IP ranges for the load balancer when the type=LoadBalancer. + loadBalancerSourceRanges: [] + # - 10.0.0.0/8 + # imagePullSecrets allows you to use secrets to pull images for pods. + imagePullSecrets: [] + # - name: "image-pull-secret" + # serviceAccount for fe access cloud service. + serviceAccount: "" + # If specified, the pod's nodeSelector,displayName="Map of nodeSelectors to match when scheduling pods on nodes" + # Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector + nodeSelector: {} + # kubernetes.io/arch: amd64 + # kubernetes.io/os: linux + # the pod labels for user select or classify pods. + podLabels: {} + # hostAliases allows adding entries to /etc/hosts inside the containers + hostAliases: [] + # - ip: "127.0.0.1" + # hostnames: + # - "example.com" + # schedulerName allows you to specify which scheduler will be used for your pods. + schedulerName: "" + # Additional fe container environment variables. + # See https://kubernetes.io/docs/tasks/inject-data-application/define-environment-variable-container/ for how to define environment variables. + # Note: If you use slice to define environment variables, and if there are multiple values files, the values in the last values file will take effect. + # If you use map to define environment variables, the values in the values files will be merged. + # You can only use one of slice and map to define environment variables. + # In order to avoid different type of feEnvVars, we do not define the default value of feEnvVars, e.g. feEnvVars: [] or feEnvVars: {}. + #feEnvVars: + # define environment variables by slice. + # e.g. static environment variable: + # - name: DEMO_GREETING + # value: "Hello from the environment" + # e.g. secret environment variable: + # - name: USERNAME + # valueFrom: + # secretKeyRef: + # name: mysecret + # key: username + # affinity for fe pod scheduling. + # Note: It will affect the scheduling of the init-password job. + affinity: {} + # nodeAffinity: + # requiredDuringSchedulingIgnoredDuringExecution: + # nodeSelectorTerms: + # - matchFields: + # - key: metadata.name + # operator: In + # values: + # - target-host-name + # podAntiAffinity: + # requiredDuringSchedulingIgnoredDuringExecution: + # - labelSelector: + # matchExpressions: + # - key: app.kubernetes.io/component + # operator: In + # values: + # - fe + # topologyKey: "kubernetes.io/hostname" + # Node tolerations for fe pod scheduling to nodes with taints + # Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + # Note: It will affect the scheduling of the init-password job. + tolerations: [] + # - key: "key" + # operator: "Equal|Exists" + # value: "value" + # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" + # topologySpreadConstraints for scheduling pods across failure-domains. + # Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/topology-spread-constraints/ + topologySpreadConstraints: [] + # - maxSkew: 1 + # topologyKey: topology.kubernetes.io/zone + # whenUnsatisfiable: DoNotSchedule|ScheduleAnyway + # labelSelector: + # matchLabels: + # foo: bar + # resources for fe pod. + resources: + requests: + cpu: 500m + memory: 1Gi + # If you want to remove one resource limit, e.g. cpu, you can set it to cpu: "unlimited". + limits: + cpu: 1000m + memory: 2Gi + # fe storageSpec for persistent metadata. + # Note: Once set, the following fields will not be allowed to be modified. + storageSpec: + # Specifies the name prefix of the volumes to mount. If left unspecified, + # `emptyDir` volumes will be used by default, which are ephemeral and data + # will be lost on pod restart. + # + # For persistent storage, specify a volume name prefix. + # For example, using `fe` as the name prefix would be appropriate. + # Note: If the values of the following mountPaths are not default, you must set the name to fe. + name: "" + # The storageClassName represent the used storageclass name. if not set will use k8s cluster default storageclass. + # You must set name when you set storageClassName. + # Note: Because hostPath field is not supported here, hostPath is not allowed to be set in storageClassName. + storageClassName: "azure-disk-std-lrs" + # the persistent volume size for data. + # fe container stop running if the disk free space which the fe meta directory residents, is less than 5Gi. + storageSize: 10Gi + # If storageMountPath is empty, the storageMountPath will be set to /opt/starrocks/fe/meta. + storageMountPath: "" + # If not set will use the value of the storageClassName field. + logStorageClassName: "azure-disk-std-lrs" + # Setting this parameter can persist log storage, and the mount path is /opt/starrocks/fe/log. + # If you set it to 0Gi, the related PVC will not be created, and the log will not be persisted. + logStorageSize: 5Gi + # If logMountPath is empty, the logMountPath will be set to /opt/starrocks/fe/log. + # If logMountPath is not /opt/starrocks/fe/log, you must add in config the following configuration: + # dump_log_dir = xxx + # sys_log_dir = xxx + # audit_log_dir = xxx + logMountPath: "" + # mount emptyDir volumes if necessary. + # Note: please use storageSpec field for persistent metadata and log. + emptyDirs: [] + # e.g. mount an emptyDir volume to /tmp + # - name: tmp-data + # mountPath: /tmp + # mount hostPath volumes if necessary. + # Note: please use storageSpec field for persistent storage data and log. + hostPaths: [] + # e.g. mount a hostPath volume to /tmp + # - name: tmp-data + # hostPath: + # path: /tmp + # type: Directory + # mountPath: /tmp + # the config for start fe. the base information as follows. + config: | + LOG_DIR = ${STARROCKS_HOME}/log + DATE = "$(date +%Y%m%d-%H%M%S)" + JAVA_OPTS="-Dlog4j2.formatMsgNoLookups=true -Xmx8192m -XX:+UseG1GC -Xlog:gc*:${LOG_DIR}/fe.gc.log.$DATE:time" + http_port = 8030 + rpc_port = 9020 + query_port = 9030 + edit_log_port = 9010 + mysql_service_nio_enabled = true + sys_log_level = INFO + # A map object for setting the config. When configyaml is set, to non-empty, the configs in configyaml will take + # precedence and values in config field will be discarded. + # Note: When using configyaml, the number needs to be quoted to avoid being converted to scientific notation. + # e.g. brpc_socket_max_unwritten_bytes: "10737418240" + configyaml: {} + # mount secrets if necessary. + # see https://kubernetes.io/docs/concepts/storage/volumes/#using-subpath for more details about subPath. + secrets: [] + # e.g. mount my-secret to /etc/my-secret + # - name: my-secret + # mountPath: /etc/my-secret + # subPath: "" + # mount configmaps if necessary. + # see https://kubernetes.io/docs/concepts/storage/volumes/#using-subpath for more details about subPath. + configMaps: [] + # e.g. mount my-configmap to /etc/my-configmap + # - name: my-configmap + # mountPath: /etc/my-configmap + # subPath: "" + # terminationGracePeriodSeconds defines duration in seconds the FE pod needs to terminate gracefully. + # default value is 120 seconds + terminationGracePeriodSeconds: 120 + + # Please upgrade the CRD with v1.8.7 released version, if you want to use the following configuration. + # including: startupProbeFailureSeconds, livenessProbeFailureSeconds, readinessProbeFailureSeconds + + # StartupProbeFailureSeconds defines the total failure seconds of startup Probe. + # default value is 300 seconds + # You can set it to "0" to disable the probe. + startupProbeFailureSeconds: + # LivenessProbeFailureSeconds defines the total failure seconds of liveness Probe. + # default value is 15 seconds + # You can set it to "0" to disable the probe. + livenessProbeFailureSeconds: + # ReadinessProbeFailureSeconds defines the total failure seconds of readiness Probe. + # default value is 15 seconds + # You can set it to "0" to disable the probe. + readinessProbeFailureSeconds: + # Lifecycle describes actions that the management system should take in response to container lifecycle events. + # By default, Operator will add corresponding preStop hooks for different components. For example, the preStop + # script for the FE Component is /opt/starrocks/fe_prestop.sh, for the BE Component is /opt/starrocks/be_prestop.sh, + # and for the CN Component is /opt/starrocks/cn_prestop.sh. + # You can just set postStart hook. + lifecycle: {} + # postStart: + # exec: + # command: + # - /bin/sh + # - -c + # - echo "Hello, world!" + # Sidecars is an optional list of containers that are run in the same pod as the starrocks component. + # You can use this field to launch helper containers that provide additional functionality to the main container. + # See https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#Container for how to define sidecars. + sidecars: [] + # - name: sidecar-container + # image: busybox + # # If starrocksFESpec.storageSpec.name is fe or not set, you can mount the volume of meta like this. + # command: ['sh', '-c', 'echo "hello from the sidecar container" >/opt/starrocks/fe/meta/sidecar-data.txt && sleep 3600'] + # volumeMounts: + # - mountPath: /opt/starrocks/fe/meta + # name: fe-meta # append -meta to the end of the name of the starrocksFESpec.storageSpec.name + # sidecarsMap is an optional map of containers that are run in the same pod as the starrocks component. + # The reason for using sidecarsMap please refer to https://github.com/StarRocks/starrocks-kubernetes-operator/issues/618 + # sidecarsMap has higher priority than sidecars, and the key of sidecarsMap is the name of the sidecar container. + sidecarsMap: {} + # sidecar-container: + # image: busybox + # # If starrocksFESpec.storageSpec.name is fe or not set, you can mount the volume of meta like this. + # command: ['sh', '-c', 'echo "hello from the sidecar container" >/opt/starrocks/fe/meta/sidecar-data.txt && sleep 3600'] + # volumeMounts: + # - mountPath: /opt/starrocks/fe/meta + # name: fe-meta # append -meta to the end of the name of the starrocksFESpec.storageSpec.name + # initContainers is an optional list of containers that are run in the same pod as the starrocks component. + # You can use this to launch helper containers that run before the main container starts. + # See https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#Container for how to configure a container. + initContainers: [] + # - name: init-container + # image: busybox + # # If starrocksFESpec.storageSpec.name is fe or not set, you can mount the volume of meta like this. + # command: ['sh', '-c', 'echo "hello from the init container" >/opt/starrocks/fe/meta/init-data.txt'] + # volumeMounts: + # - mountPath: /opt/starrocks/fe/meta + # name: fe-meta # append -meta to the end of the name of the starrocksFESpec.storageSpec.name + # Max unavailable pods for the fe component when doing rolling update. + # This field cannot be 0. The default setting is 1. + # Note: Because Operator uses statefulset to manage this component, the maxUnavailable field is in Alpha stage, and it is honored + # only by API servers that are running with the MaxUnavailableStatefulSet feature gate enabled. + # See https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#maximum-unavailable-pods for more details. + maxUnavailablePods: + # Share a single process namespace between all of the containers in a pod. + # When this is set containers will be able to view and signal processes from other containers + # in the same pod, and the first process in each container will not be assigned PID 1. + shareProcessNamespace: + +# spec for compute node, compute node provide compute function. +starrocksCnSpec: + # number of replicas to deploy for CN component. + # + # When autoscaling of the CN statefulset is used the `replicas` field is set to null + # by the operator, and autoscaling is controlled by Horizontal Pod Autoscaling (HPA). + # You should only uncomment and set the `replicas` field if autoscaling is disabled. + # replicas: 1 + image: + # image sliced by "repository:tag" + repository: starrocks/cn-ubuntu + tag: "" + imagePullPolicy: IfNotPresent + # Specify the entrypoint for CN. + # By default, operator will use '/opt/starrocks/cn_entrypoint.sh' as command, and use '$(FE_SERVICE_NAME)' as args in container spec. + # If entrypoint is set, the command will be ["bash", "-c"], and the args will be filename of the entrypoint script. + # A configmap with name $cluster-cn-entrypoint-script will be created, and the script will be mounted to /etc/starrocks/entrypoint.sh + # Pod will be restarted if the entrypoint script is updated. + entrypoint: {} + # script: | + # #! /bin/bash + # echo "do something before start cn" + # exec /opt/starrocks/cn_entrypoint.sh $FE_SERVICE_NAME + # serviceAccount for cn access cloud service. + serviceAccount: "" + # add annotations for cn pods. example, if you want to config monitor for datadog, you can config the annotations. + annotations: {} + # If runAsNonRoot is true, the container is run as non-root user. + # The userId will be set to 1000, and the groupID will be set to 1000. + # Note: If you have started the container through root, and then FE/BE began to create directories, write files, etc. + # under the mounted directory as root. When you start the container as a non-root user, the container will not + # have permission to access these files. So you'd better set runAsNonRoot to true when you set up the cluster. + runAsNonRoot: false + # Whether this container has a read-only root filesystem. + # Note: The FE/BE/CN container should support read-only root filesystem. The newest version of FE/BE/CN is 3.3.6, and does not support read-only root filesystem. + readOnlyRootFilesystem: false + # add/drop capabilities for CN container. + capabilities: {} + # add: + # - PERFMON + # - SYS_PTRACE + # drop: + # - SYS_ADMIN + # set sysctls for cn pod. + # See https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/ for how to set sysctls. + # Note: The use of unsafe sysctls is at-your-own-risk and can lead to severe problems + sysctls: [] + # - name: net.ipv4.ip_unprivileged_port_start + # value: "2048" + # specify the service name and port config and serviceType + # the service type refer https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types + service: + # the cn service type, only supported ClusterIP, NodePort, LoadBalancer + type: "ClusterIP" + # the loadBalancerIP for static ip config when the type=LoadBalancer and loadBalancerIp is not empty. + loadbalancerIP: "" + # add annotations for external cn service. + annotations: {} + # Add labels for external cn service. The operator may add its own default labels. + labels: {} + # config the service port for cn service. + # To assign a specific port or nodePort to a service, you should specify them by the corresponding name or + # containerPort in the service configuration. If both containerPort and name are specified, containerPort takes precedence. + # For cn, port name can be webserver, heartbeat, brpc, thrift, and their default container port is 8040, 9050, 8060, 9060. + ports: [] + # e.g. specify a dedicated node port for cn service by containerPort. + # - nodePort: 30040 # The range of valid ports is 30000-32767 + # containerPort: 8040 # The port on the container to expose + # specify the source IP ranges for the load balancer when the type=LoadBalancer. + loadBalancerSourceRanges: [] + # - 10.0.0.0/8 + # imagePullSecrets allows you to use secrets for pulling images for your pods. + imagePullSecrets: [] + # - name: "image-pull-secret" + # If specified, the pod's nodeSelector,displayName="Map of nodeSelectors to match when scheduling pods on nodes" + # Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector + nodeSelector: {} + # kubernetes.io/arch: amd64 + # kubernetes.io/os: linux + # the pod labels for user select or classify pods. + podLabels: {} + ## hostAliases allows adding entries to /etc/hosts inside the containers + hostAliases: [] + # - ip: "127.0.0.1" + # hostnames: + # - "example.com" + # schedulerName allows you to specify which scheduler will be used for the pod + schedulerName: "" + # Additional cn container environment variables. + # See https://kubernetes.io/docs/tasks/inject-data-application/define-environment-variable-container/ for how to define environment variables. + # Note: If you use slice to define environment variables, and if there are multiple values files, the values in the last values file will take effect. + # If you use map to define environment variables, the values in the values files will be merged. + # You can only use one of slice and map to define environment variables. + # In order to avoid different type of cnEnvVars, we do not define the default value of cnEnvVars, e.g. cnEnvVars: [] or cnEnvVars: {}. + # cnEnvVars: + # define environment variables by slice. + # e.g. static environment variable: + # - name: DEMO_GREETING + # value: "Hello from the environment" + # e.g. secret environment variable: + # - name: USERNAME + # valueFrom: + # secretKeyRef: + # name: mysecret + # key: username + # affinity for cn pod scheduling. + affinity: {} + # nodeAffinity: + # requiredDuringSchedulingIgnoredDuringExecution: + # nodeSelectorTerms: + # - matchFields: + # - key: metadata.name + # operator: In + # values: + # - target-host-name + # podAntiAffinity: + # requiredDuringSchedulingIgnoredDuringExecution: + # - labelSelector: + # matchExpressions: + # - key: app.kubernetes.io/component + # operator: In + # values: + # - cn + # topologyKey: "kubernetes.io/hostname" + # Node tolerations for cn pod scheduling to nodes with taints + # Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + tolerations: [] + # - key: "key" + # operator: "Equal|Exists" + # value: "value" + # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" + # topologySpreadConstraints for scheduling pods across failure-domains. + # Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/topology-spread-constraints/ + topologySpreadConstraints: [] + # - maxSkew: 1 + # topologyKey: topology.kubernetes.io/zone + # whenUnsatisfiable: DoNotSchedule|ScheduleAnyway + # labelSelector: + # matchLabels: + # foo: bar + autoScalingPolicy: {} + # you can select different versions of HPA (Horizontal Pod Autoscaler) based on the Kubernetes version you are + # using to ensure compatibility and adaptability. the default version is v2beta2. + # version: v2beta2 + # maxReplicas: 10 + # minReplicas: 1 + # hpaPolicy: + # metrics: + # - type: Resource + # resource: + # name: memory + # target: + # averageUtilization: 30 + # type: Utilization + # - type: Resource + # resource: + # name: cpu + # target: + # averageUtilization: 30 + # type: Utilization + # behavior: + # scaleUp: + # policies: + # - type: Pods + # value: 1 + # periodSeconds: 10 + # scaleDown: + # selectPolicy: Disabled + # define resources requests and limits for cn pods. + resources: + requests: + cpu: 4 + memory: 8Gi + # If you want to remove one resource limit, e.g. cpu, you can set it to cpu: "unlimited". + limits: + cpu: 8 + memory: 8Gi + # specify storageclass name and request size. + # Note: Once set, the following fields will not be allowed to be modified. + storageSpec: + # Specifies the name prefix of the volumes to mount. If left unspecified, + # `emptyDir` volumes will be used, which are ephemeral, and only for log. + # The logs will be lost on pod restart when using emptyDir volumes. + # + # For persistent storage, specify a volume name prefix. + # For example, using `cn` as the name prefix would be appropriate. + # Note: If the values of the following mountPaths are not default, you must set the name to cn. + name: "" + # The storageClassName represent the used storageclass name. if not set will use k8s cluster default storageclass. + # You must set name when you set storageClassName + # Note: Because hostPath field is not supported here, hostPath is not allowed to be set in storageClassName. + storageClassName: "" + # the storage size of per persistent volume for data. + storageSize: 1Ti + # the number of persistent volumes for data. + # if storageCount == 1 + # the storageMountPath field is used to specify the mount path of the persistent volume. If storageMountPath is empty, + # the storageMountPath will be set to /opt/starrocks/cn/storage. + # If storageMountPath is not /opt/starrocks/cn/storage, you must add in config the following configuration: storage_root_path = xxx. + # if storageCount > 1 + # the storageMountPath field is used to specify the prefix of mount path of the persistent volume. For example, if the + # storageMountPath is /opt/starrocks/cn/storage, the real mount path will be /opt/starrocks/cn/storage0, /opt/starrocks/cn/storage1, ... + # You must add in config the following configuration: storage_root_path = /opt/starrocks/cn/storage0;/opt/starrocks/cn/storage1;... + storageCount: 1 + # see the comment of storageCount for the usage of storageMountPath. + storageMountPath: "" + # If not set will use the value of the storageClassName field. + logStorageClassName: "" + # the storage size of persistent volume for log, and the mount path is /opt/starrocks/cn/log. + # If you set it to 0Gi, the related PVC will not be created, and the log will not be persisted. + logStorageSize: 20Gi + # If logMountPath is empty, the logMountPath will be set to /opt/starrocks/cn/log. + # If logMountPath is not /opt/starrocks/cn/log, you must add in config the following configuration: sys_log_dir = xxx. + logMountPath: "" + # If not set will use the value of the storageClassName field. + spillStorageClassName: "" + # Setting this parameter can persist spill storage, and the mount path is /opt/starrocks/cn/spill. + # If you set it to 0Gi, the related PVC will not be created, and the spill will not be persisted. + # You need to add spill_local_storage_dir=/opt/starrocks/cn/spill in cn.conf. + spillStorageSize: 0Gi + # If spillMountPath is empty, the spillMountPath will be set to /opt/starrocks/cn/spill. + # If spillMountPath is not /opt/starrocks/cn/spill, you must add in config the following configuration: spill_local_storage_dir = xxx. + spillMountPath: "" + # mount emptyDir volumes if necessary. + # Note: please use storageSpec field for persistent storage data and log. + emptyDirs: [] + # e.g. mount an emptyDir volume to /tmp + # - name: tmp-data + # mountPath: /tmp + # mount hostPath volumes if necessary. + # Note: + # 1. please use storageSpec field for persistent storage data and log. + # 2. please use podAntiAffinity to avoid the pods are scheduled on the same node. + # 3. If you use hostPath to mount the volume of cache data, cache will be lost when the pod is restarted. + hostPaths: [] + # e.g. mount a hostPath volume to /tmp + # - name: tmp-data + # hostPath: + # path: /tmp + # type: Directory + # mountPath: /tmp + # the config start for cn, the base information as follows. + # From StarRocks 3.1, the official documentation use: + # 1. be_port instead of thrift_port, but the thrift_port is still supported. + # 2. be_http_port instead of webserver_port, but the webserver_port is still supported. + # In order to avoid the impact of the change on the user's deployment, we still use the old configuration. + config: | + sys_log_level = INFO + # ports for admin, web, heartbeat service + thrift_port = 9060 + webserver_port = 8040 + heartbeat_service_port = 9050 + brpc_port = 8060 + # A map object for setting the config. When configyaml is set, to non-empty, the configs in configyaml will take + # precedence and values in config field will be discarded. + # Note: When using configyaml, the number needs to be quoted to avoid being converted to scientific notation. + # e.g. brpc_socket_max_unwritten_bytes: "10737418240" + configyaml: {} + # mount secrets if necessary. + # see https://kubernetes.io/docs/concepts/storage/volumes/#using-subpath for more details about subPath. + secrets: [] + # e.g. mount my-secret to /etc/my-secret + # - name: my-secret + # mountPath: /etc/my-secret + # subPath: "" + # mount configmaps if necessary. + # see https://kubernetes.io/docs/concepts/storage/volumes/#using-subpath for more details about subPath. + configMaps: [] + # e.g. mount my-configmap to /etc/my-configmap + # - name: my-configmap + # mountPath: /etc/my-configmap + # subPath: "" + # terminationGracePeriodSeconds defines duration in seconds the CN pod needs to terminate gracefully. + # default value is 120 seconds + terminationGracePeriodSeconds: 120 + + # Please upgrade the CRD with v1.8.7 released version, if you want to use the following configuration. + # including: startupProbeFailureSeconds, livenessProbeFailureSeconds, readinessProbeFailureSeconds + + # StartupProbeFailureSeconds defines the total failure seconds of startup Probe. + # default value is 300 seconds + # You can set it to "0" to disable the probe. + startupProbeFailureSeconds: + # LivenessProbeFailureSeconds defines the total failure seconds of liveness Probe. + # default value is 15 seconds + # You can set it to "0" to disable the probe. + livenessProbeFailureSeconds: + # ReadinessProbeFailureSeconds defines the total failure seconds of readiness Probe. + # default value is 15 seconds + # You can set it to "0" to disable the probe. + readinessProbeFailureSeconds: + # Lifecycle describes actions that the management system should take in response to container lifecycle events. + # By default, Operator will add corresponding preStop hooks for different components. For example, the preStop + # script for the FE Component is /opt/starrocks/fe_prestop.sh, for the BE Component is /opt/starrocks/be_prestop.sh, + # and for the CN Component is /opt/starrocks/cn_prestop.sh. + # You can just set postStart hook. + lifecycle: {} + # postStart: + # exec: + # command: + # - /bin/sh + # - -c + # - echo "Hello, world!" + # Sidecars is an optional list of containers that are run in the same pod as the starrocks component. + # You can use this field to launch helper containers that provide additional functionality to the main container. + # See https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#Container for how to configure a container. + sidecars: [] + # - name: sidecar-container + # image: busybox + # # If starrocksCnSpec.storageSpec.name is cn, you can mount the volume of cache data like this. + # # If starrocksCnSpec.storageSpec.name is not set, no default volume will be created, so you can not mount the volume of cache data like this. + # command: ['sh', '-c', 'echo "hello from the sidecar container" >/opt/starrocks/cn/storage/sidecar-data.txt && sleep 3600'] + # volumeMounts: + # - mountPath: /opt/starrocks/cn/storage + # name: cn-data # append -data to the end of the name of the starrocksCnSpec.storageSpec.name + # sidecarsMap is an optional map of containers that are run in the same pod as the starrocks component. + # The reason for using sidecarsMap please refer to https://github.com/StarRocks/starrocks-kubernetes-operator/issues/618 + # sidecarsMap has higher priority than sidecars, and the key of sidecarsMap is the name of the sidecar container. + sidecarsMap: {} + # sidecar-container: + # image: busybox + # # If starrocksCnSpec.storageSpec.name is cn, you can mount the volume of cache data like this. + # # If starrocksCnSpec.storageSpec.name is not set, no default volume will be created, so you can not mount the volume of cache data like this. + # command: ['sh', '-c', 'echo "hello from the sidecar container" >/opt/starrocks/cn/storage/sidecar-data.txt && sleep 3600'] + # volumeMounts: + # - mountPath: /opt/starrocks/cn/storage + # name: cn-data # append -data to the end of the name of the starrocksCnSpec.storageSpec.name + # initContainers is an optional list of containers that are run in the same pod as the starrocks component. + # You can use this to launch helper containers that run before the main container starts. + # See https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#Container for how to configure a container. + initContainers: [] + # - name: init-container + # image: busybox + # # If starrocksCnSpec.storageSpec.name is cn, you can mount the volume of cache data like this. + # # If starrocksCnSpec.storageSpec.name is not set, no default volume will be created, so you can not mount the volume of cache data like this. + # command: ['sh', '-c', 'echo "hello from the init container" >/opt/starrocks/cn/storage/init-data.txt'] + # volumeMounts: + # - mountPath: /opt/starrocks/cn/storage + # name: cn-data # append -data to the end of the name of the starrocksCnSpec.storageSpec.name + # Max unavailable pods for the cn component when doing rolling update. + # This field cannot be 0. The default setting is 1. + # Note: Because Operator uses statefulset to manage this component, the maxUnavailable field is in Alpha stage, and it is honored + # only by API servers that are running with the MaxUnavailableStatefulSet feature gate enabled. + # See https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#maximum-unavailable-pods for more details. + maxUnavailablePods: + # Share a single process namespace between all of the containers in a pod. + # When this is set containers will be able to view and signal processes from other containers + # in the same pod, and the first process in each container will not be assigned PID 1. + shareProcessNamespace: + +# spec for component be, provide storage and compute function. +starrocksBeSpec: + # number of replicas to deploy. + replicas: 1 + image: + # image sliced by "repository:tag" + repository: starrocks/be-ubuntu + tag: "" + imagePullPolicy: IfNotPresent + # Specify the entrypoint for BE. + # By default, operator will use '/opt/starrocks/be_entrypoint.sh' as command, and use '$(FE_SERVICE_NAME)' as args in container spec. + # If entrypoint is set, the command will be ["bash", "-c"], and the args will be filename of the entrypoint script. + # A configmap with name $cluster-be-entrypoint-script will be created, and the script will be mounted to /etc/starrocks/entrypoint.sh + # Pod will be restarted if the entrypoint script is updated. + entrypoint: {} + # script: | + # #! /bin/bash + # echo "do something before start BE" + # exec /opt/starrocks/be_entrypoint.sh $FE_SERVICE_NAME + # serviceAccount for be access cloud service. + serviceAccount: "" + # add annotations for be pods. example, if you want to config monitor for datadog, you can config the annotations. + annotations: {} + # If runAsNonRoot is true, the container is run as non-root user. + # The userId will be set to 1000, and the groupID will be set to 1000. + # Note: If you have started the container through root, and then FE/BE began to create directories, write files, etc. + # under the mounted directory as root. When you start the container as a non-root user, the container will not + # have permission to access these files. So you'd better set runAsNonRoot to true when you set up the cluster. + runAsNonRoot: false + # Whether this container has a read-only root filesystem. + # Note: The FE/BE/CN container should support read-only root filesystem. The newest version of FE/BE/CN is 3.3.6, and does not support read-only root filesystem. + readOnlyRootFilesystem: false + # add/drop capabilities for BE container. + capabilities: {} + # add: + # - PERFMON + # - SYS_PTRACE + # drop: + # - SYS_ADMIN + # set sysctls for be pod. + # See https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/ for how to set sysctls. + # Note: The use of unsafe sysctls is at-your-own-risk and can lead to severe problems + sysctls: [] + # - name: net.ipv4.ip_unprivileged_port_start + # value: "2048" + # specify the service name and port config and serviceType + # the service type refer https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types + service: + # the be service type, only supported ClusterIP, NodePort, LoadBalancer + type: "ClusterIP" + # the loadBalancerIP for static ip config when the type=LoadBalancer and loadbalancerIp is not empty. + loadbalancerIP: "" + # add annotations for external be service. + annotations: {} + # Add labels for external be service. The operator may add its own default labels. + labels: {} + # config the service port for be service. + # To assign a specific port or nodePort to a service, you should specify them by the corresponding name or + # containerPort in the service configuration. If both containerPort and name are specified, containerPort takes precedence. + # For be, port name can be webserver, heartbeat, brpc, be, and their default container port is 8040, 9050, 8060, 9060. + ports: [] + # e.g. specify a dedicated node port for be service by containerPort. + # - nodePort: 30040 # The range of valid ports is 30000-32767 + # containerPort: 8040 # The port on the container to expose + # specify the source IP ranges for the load balancer when the type=LoadBalancer. + loadBalancerSourceRanges: [] + # - 10.0.0.0/8 + # imagePullSecrets allows you to use secrets to pull images for pods. + imagePullSecrets: [] + # - name: "image-pull-secret" + # If specified, the pod's nodeSelector,displayName="Map of nodeSelectors to match when scheduling pods on nodes" + # Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector + nodeSelector: {} + # kubernetes.io/arch: amd64 + # kubernetes.io/os: linux + # the pod labels for user select or classify pods. + podLabels: {} + ## hostAliases allows adding entries to /etc/hosts inside the containers + hostAliases: [] + # - ip: "127.0.0.1" + # hostnames: + # - "example.com" + # schedulerName allows you to specify which scheduler will be used for the pod + schedulerName: "" + # Additional be container environment variables. + # See https://kubernetes.io/docs/tasks/inject-data-application/define-environment-variable-container/ for how to define environment variables. + # Note: If you use slice to define environment variables, and if there are multiple values files, the values in the last values file will take effect. + # If you use map to define environment variables, the values in the values files will be merged. + # You can only use one of slice and map to define environment variables. + # In order to avoid different type of beEnvVars, we do not define the default value of beEnvVars, e.g. beEnvVars: [] or beEnvVars: {}. + # beEnvVars: + # define environment variables by slice. + # e.g. static environment variable: + # - name: DEMO_GREETING + # value: "Hello from the environment" + # e.g. secret environment variable: + # - name: USERNAME + # valueFrom: + # secretKeyRef: + # name: mysecret + # key: username + # affinity for fe pod scheduling. + affinity: {} + # nodeAffinity: + # requiredDuringSchedulingIgnoredDuringExecution: + # nodeSelectorTerms: + # - matchFields: + # - key: metadata.name + # operator: In + # values: + # - target-host-name + # podAntiAffinity: + # requiredDuringSchedulingIgnoredDuringExecution: + # - labelSelector: + # matchExpressions: + # - key: app.kubernetes.io/component + # operator: In + # values: + # - be + # topologyKey: "kubernetes.io/hostname" + # Node tolerations for be pod scheduling to nodes with taints + # Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + tolerations: [] + # - key: "key" + # operator: "Equal|Exists" + # value: "value" + # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" + # topologySpreadConstraints for scheduling pods across failure-domains. + # Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/topology-spread-constraints/ + topologySpreadConstraints: [] + # - maxSkew: 1 + # topologyKey: topology.kubernetes.io/zone + # whenUnsatisfiable: DoNotSchedule|ScheduleAnyway + # labelSelector: + # matchLabels: + # foo: bar + # resources for be pod. + resources: + requests: + cpu: 500m + memory: 1Gi + # If you want to remove one resource limit, e.g. cpu, you can set it to cpu: "unlimited". + limits: + cpu: 1000m + memory: 2Gi + # be storageSpec for persistent storage. + # Note: Once set, the following fields will not be allowed to be modified. + storageSpec: + # Specifies the name prefix of the volumes to mount. If left unspecified, + # `emptyDir` volumes will be used by default, which are ephemeral and data + # will be lost on pod restart. + # + # For persistent storage, specify a volume name prefix. + # For example, using `be` as the name prefix would be appropriate. + # Note: If the values of the following mountPaths are not default, you must set the name to be. + name: "" + # The storageClassName represent the used storageclass name. if not set will use k8s cluster default storageclass. + # You must set name when you set storageClassName + # Note: Because hostPath field is not supported here, hostPath is not allowed to be set in storageClassName. + storageClassName: "azure-disk-std-lrs" + # the storage size of per persistent volume for data. + storageSize: 25Gi + # the number of persistent volumes for data. + # if storageCount == 1 + # the storageMountPath field is used to specify the mount path of the persistent volume. If storageMountPath is empty, + # the storageMountPath will be set to /opt/starrocks/be/storage. + # If storageMountPath /opt/starrocks/be/storage, you must add in config the following configuration: storage_root_path = xxx. + # if storageCount > 1 + # the storageMountPath field is used to specify the prefix of mount path of the persistent volume. For example, if the + # storageMountPath is /opt/starrocks/be/storage, the real mount path will be /opt/starrocks/be/storage0, /opt/starrocks/be/storage1, ... + # You must add in config the following configuration: storage_root_path = /opt/starrocks/be/storage0;/opt/starrocks/be/storage1;... + storageCount: 1 + # see the comment of storageCount for the usage of storageMountPath. + storageMountPath: "" + # If not set will use the value of the storageClassName field. + logStorageClassName: "azure-disk-std-lrs" + # Setting this parameter can persist log storage, and the mount path is /opt/starrocks/be/log. + # If you set it to 0Gi, the related PVC will not be created, and the log will not be persisted. + logStorageSize: 10Gi + # If logMountPath is empty, the logMountPath will be set to /opt/starrocks/be/log. + # If logMountPath is not /opt/starrocks/be/log, you must add in config the following configuration: sys_log_dir = xxx. + logMountPath: "" + # If not set will use the value of the storageClassName field. + spillStorageClassName: "azure-disk-std-lrs" + # Setting this parameter can persist spill storage, and the mount path is /opt/starrocks/be/spill. + # If you set it to 0Gi, the related PVC will not be created, and the spill will not be persisted. + # You need to add spill_local_storage_dir=/opt/starrocks/be/spill in be.conf. + spillStorageSize: 0Gi + # If spillMountPath is empty, the spillMountPath will be set to /opt/starrocks/be/spill. + # If spillMountPath is not /opt/starrocks/be/spill, you must add in config the following configuration: spill_local_storage_dir = xxx. + spillMountPath: "" + # mount emptyDir volumes if necessary. + # Note: please use storageSpec field for persistent storage data and log. + emptyDirs: [] + # e.g. mount an emptyDir volume to /tmp + # - name: tmp-data + # mountPath: /tmp + # mount hostPath volumes if necessary. + # Note: please use storageSpec field for persistent storage data and log. + hostPaths: [] + # e.g. mount a hostPath volume to /tmp + # - name: tmp-data + # hostPath: + # path: /tmp + # type: Directory + # mountPath: /tmp + # the config for start be. the base information as follows. + # From StarRocks 3.1, the official documentation use: + # 1. be_http_port instead of webserver_port, but the webserver_port is still supported. + # In order to avoid the impact of the change on the user's deployment, we still use the old configuration. + config: | + be_port = 9060 + webserver_port = 8040 + heartbeat_service_port = 9050 + brpc_port = 8060 + sys_log_level = INFO + default_rowset_type = beta + # A map object for setting the config. When configyaml is set, to non-empty, the configs in configyaml will take + # precedence and values in config field will be discarded. + # Note: When using configyaml, the number needs to be quoted to avoid being converted to scientific notation. + # e.g. brpc_socket_max_unwritten_bytes: "10737418240" + configyaml: {} + # mount secrets if necessary. + # see https://kubernetes.io/docs/concepts/storage/volumes/#using-subpath for more details about subPath. + secrets: [] + # e.g. mount my-secret to /etc/my-secret + # - name: my-secret + # mountPath: /etc/my-secret + # subPath: "" + # mount configmaps if necessary. + # see https://kubernetes.io/docs/concepts/storage/volumes/#using-subpath for more details about subPath. + configMaps: [] + # e.g. mount my-configmap to /etc/my-configmap + # - name: my-configmap + # mountPath: /etc/my-configmap + # subPath: "" + # terminationGracePeriodSeconds defines duration in seconds the BE pod needs to terminate gracefully. + # default value is 120 seconds + terminationGracePeriodSeconds: 120 + + # Please upgrade the CRD with v1.8.7 released version, if you want to use the following configuration. + # including: startupProbeFailureSeconds, livenessProbeFailureSeconds, readinessProbeFailureSeconds + + # StartupProbeFailureSeconds defines the total failure seconds of startup Probe. + # default value is 300 seconds + # You can set it to "0" to disable the probe. + startupProbeFailureSeconds: + # LivenessProbeFailureSeconds defines the total failure seconds of liveness Probe. + # default value is 15 seconds + # You can set it to "0" to disable the probe. + livenessProbeFailureSeconds: + # ReadinessProbeFailureSeconds defines the total failure seconds of readiness Probe. + # default value is 15 seconds + # You can set it to "0" to disable the probe. + readinessProbeFailureSeconds: + # Lifecycle describes actions that the management system should take in response to container lifecycle events. + # By default, Operator will add corresponding preStop hooks for different components. For example, the preStop + # script for the FE Component is /opt/starrocks/fe_prestop.sh, for the BE Component is /opt/starrocks/be_prestop.sh, + # and for the CN Component is /opt/starrocks/cn_prestop.sh. + # You can just set postStart hook. + lifecycle: {} + # postStart: + # exec: + # command: + # - /bin/sh + # - -c + # - echo "Hello, world!" + # Sidecars is an optional list of containers that are run in the same pod as the starrocks component. + # You can use this field to launch helper containers that provide additional functionality to the main container. + # See https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#Container for how to define sidecars. + sidecars: [] + # - name: sidecar-container + # image: busybox + # # If your starrocksBeSpec.storageSpec.name is be or not set, you can mount the volume of data like this. + # command: ['sh', '-c', 'echo "hello from the sidecar container" >/opt/starrocks/be/storage/sidecar-data.txt && sleep 3600'] + # volumeMounts: + # - mountPath: /opt/starrocks/be/storage + # name: be-data # append -data to the end of the name of the starrocksBeSpec.storageSpec.name + # sidecarsMap is an optional map of containers that are run in the same pod as the starrocks component. + # The reason for using sidecarsMap please refer to https://github.com/StarRocks/starrocks-kubernetes-operator/issues/618 + # sidecarsMap has higher priority than sidecars, and the key of sidecarsMap is the name of the sidecar container. + sidecarsMap: {} + # sidecar-container: + # image: busybox + # # If your starrocksBeSpec.storageSpec.name is be or not set, you can mount the volume of data like this. + # command: ['sh', '-c', 'echo "hello from the sidecar container" >/opt/starrocks/be/storage/sidecar-data.txt && sleep 3600'] + # volumeMounts: + # - mountPath: /opt/starrocks/be/storage + # name: be-data # append -data to the end of the name of the starrocksBeSpec.storageSpec.name + # initContainers is an optional list of containers that are run in the same pod as the starrocks component. + # You can use this to launch helper containers that run before the main container starts. + # See https://kubernetes.io/docs/reference/kubernetes-api/workload-resources/pod-v1/#Container for how to configure a container. + initContainers: [] + # - name: init-container + # image: busybox + # # If your starrocksBeSpec.storageSpec.name is be or not set, you can mount the volume of data like this. + # command: ['sh', '-c', 'echo "hello from the init container" >/opt/starrocks/be/storage/init-data.txt'] + # volumeMounts: + # - mountPath: /opt/starrocks/be/storage + # name: be-data # append -data to the end of the name of the starrocksBeSpec.storageSpec.name + # Max unavailable pods for the be component when doing rolling update. + # This field cannot be 0. The default setting is 1. + # Note: Because Operator uses statefulset to manage this component, the maxUnavailable field is in Alpha stage, and it is honored + # only by API servers that are running with the MaxUnavailableStatefulSet feature gate enabled. + # See https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#maximum-unavailable-pods for more details. + maxUnavailablePods: + # Share a single process namespace between all of the containers in a pod. + # When this is set containers will be able to view and signal processes from other containers + # in the same pod, and the first process in each container will not be assigned PID 1. + shareProcessNamespace: + +# create secrets if necessary. +secrets: [] + # e.g. create my-secret + # - name: my-secret + # data: + # key: | + # this is the content of the secret + # when mounted, key will be the name of the file + +# create configmaps if necessary. +configMaps: [] + # e.g. create my-configmap + # - name: my-configmap + # data: + # key: | + # this is the content of the configmap + # when mounted, key will be the name of the file + +# If you needs to deploy other resources, e.g. serviceAccount, you can add them here. +# You can even deploy resources to different namespaces +resources: [] + # - apiVersion: v1 + # kind: ServiceAccount + # metadata: + # name: sa-for-starrocks + # namespace: starrocks + +# specify the fe proxy deploy or not. +starrocksFeProxySpec: + # specify the fe proxy deploy or not. + enabled: false + replicas: 1 + imagePullPolicy: IfNotPresent + # default nginx:1.24.0 + image: + repository: "" + tag: "" + resources: + requests: + cpu: 1 + memory: 2Gi + limits: + cpu: 1 + memory: 2Gi + # set the resolver for nginx server, default kube-dns.kube-system.svc.cluster.local + resolver: "" + service: + # the fe proxy service type, only supported ClusterIP, NodePort, LoadBalancer + # default ClusterIP + type: ClusterIP + # the loadBalancerIP for static ip config when the type=LoadBalancer and loadbalancerIp is not empty. + loadbalancerIP: "" + # add annotations for external fe proxy service. + annotations: {} + # Add labels for external fe proxy service. The operator may add its own default labels. + labels: {} + # config the service port for fe proxy service. + # To assign a specific port or nodePort to a service, you should specify them by the corresponding name or + # containerPort in the service configuration. If both containerPort and name are specified, containerPort takes precedence. + # For fe proxy, port name can be http-port, and its default container port is 8080. + ports: [] + # e.g. specify a dedicated node port for fe proxy service by containerPort. + # - nodePort: 30080 # The range of valid ports is 30000-32767 + # containerPort: 8080 # The port on the container to expose + # specify the source IP ranges for the load balancer when the type=LoadBalancer. + loadBalancerSourceRanges: [] + # - 10.0.0.0/8 + # imagePullSecrets allows you to use secrets for pulling images for your pods. + imagePullSecrets: [] + # - name: "image-pull-secret" + # If specified, the pod's nodeSelector,displayName="Map of nodeSelectors to match when scheduling pods on nodes" + # Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector + nodeSelector: {} + # kubernetes.io/arch: amd64 + # kubernetes.io/os: linux + # affinity for fe proxy pod scheduling. + affinity: {} + # nodeAffinity: + # requiredDuringSchedulingIgnoredDuringExecution: + # nodeSelectorTerms: + # - matchFields: + # - key: metadata.name + # operator: In + # values: + # - target-host-name + # podAntiAffinity: + # requiredDuringSchedulingIgnoredDuringExecution: + # - labelSelector: + # matchExpressions: + # - key: app.kubernetes.io/component + # operator: In + # values: + # - fe-proxy + # topologyKey: "kubernetes.io/hostname" + # Node tolerations for fe proxy pod scheduling to nodes with taints + # Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + tolerations: [] + # - key: "key" + # operator: "Equal|Exists" + # value: "value" + # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" + + # Please upgrade the CRD with v1.8.7 released version, if you want to use the following configuration. + # including: livenessProbeFailureSeconds, readinessProbeFailureSeconds + + # LivenessProbeFailureSeconds defines the total failure seconds of liveness Probe. + # default value is 15 seconds + # You can set it to "0" to disable the probe. + livenessProbeFailureSeconds: + # ReadinessProbeFailureSeconds defines the total failure seconds of readiness Probe. + # default value is 15 seconds + # You can set it to "0" to disable the probe. + readinessProbeFailureSeconds: + # Note: will create emptyDir volume for fe proxy, PVC is not supported. + emptyDirs: [] + # e.g. mount an emptyDir volume to /tmp + # - name: tmp-data + # mountPath: /tmp \ No newline at end of file diff --git a/cluster/manifests/freeleaps-data-platform/star-rocks/vpa.yaml b/cluster/manifests/freeleaps-data-platform/star-rocks/vpa.yaml new file mode 100644 index 00000000..b246f09f --- /dev/null +++ b/cluster/manifests/freeleaps-data-platform/star-rocks/vpa.yaml @@ -0,0 +1,43 @@ +apiVersion: autoscaling.k8s.io/v1 +kind: VerticalPodAutoscaler +metadata: + name: starrocks-fe-vpa + namespace: freeleaps-data-platform +spec: + resourcePolicy: + containerPolicies: + - containerName: '*' + controlledResources: + - cpu + - memory + maxAllowed: + cpu: 500m + memory: 2Gi + targetRef: + apiVersion: apps/v1 + kind: StatefulSet + name: freeleaps-starrocks-fe + updatePolicy: + updateMode: "Auto" +--- +apiVersion: autoscaling.k8s.io/v1 +kind: VerticalPodAutoscaler +metadata: + name: starrocks-be-vpa + namespace: freeleaps-data-platform +spec: + resourcePolicy: + containerPolicies: + - containerName: '*' + controlledResources: + - cpu + - memory + maxAllowed: + cpu: 500m + memory: 2Gi + targetRef: + apiVersion: apps/v1 + kind: StatefulSet + name: freeleaps-starrocks-be + updatePolicy: + updateMode: "Auto" \ No newline at end of file diff --git a/cluster/manifests/freeleaps-data-platform/zookeeper/values.yaml b/cluster/manifests/freeleaps-data-platform/zookeeper/values.yaml new file mode 100644 index 00000000..874fd3fc --- /dev/null +++ b/cluster/manifests/freeleaps-data-platform/zookeeper/values.yaml @@ -0,0 +1,1031 @@ +# Copyright Broadcom, Inc. All Rights Reserved. +# SPDX-License-Identifier: APACHE-2.0 + +## @section Global parameters +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry, imagePullSecrets and storageClass +## + +## @param global.imageRegistry Global Docker image registry +## @param global.imagePullSecrets Global Docker registry secret names as an array +## @param global.defaultStorageClass Global default StorageClass for Persistent Volume(s) +## @param global.storageClass DEPRECATED: use global.defaultStorageClass instead +## +global: + imageRegistry: "" + ## E.g. + ## imagePullSecrets: + ## - myRegistryKeySecretName + ## + imagePullSecrets: [] + defaultStorageClass: "azure-disk-std-lrs" + storageClass: "azure-disk-std-lrs" + ## Security parameters + ## + security: + ## @param global.security.allowInsecureImages Allows skipping image verification + allowInsecureImages: false + ## Compatibility adaptations for Kubernetes platforms + ## + compatibility: + ## Compatibility adaptations for Openshift + ## + openshift: + ## @param global.compatibility.openshift.adaptSecurityContext Adapt the securityContext sections of the deployment to make them compatible with Openshift restricted-v2 SCC: remove runAsUser, runAsGroup and fsGroup and let the platform use their allowed default IDs. Possible values: auto (apply if the detected running cluster is Openshift), force (perform the adaptation always), disabled (do not perform adaptation) + ## + adaptSecurityContext: auto +## @section Common parameters +## + +## @param kubeVersion Override Kubernetes version +## +kubeVersion: "" +## @param nameOverride String to partially override common.names.fullname template (will maintain the release name) +## +nameOverride: "" +## @param fullnameOverride String to fully override common.names.fullname template +## +fullnameOverride: "" +## @param clusterDomain Kubernetes Cluster Domain +## +clusterDomain: freeleaps.cluster +## @param extraDeploy Extra objects to deploy (evaluated as a template) +## +extraDeploy: [] +## @param commonLabels Add labels to all the deployed resources +## +commonLabels: {} +## @param commonAnnotations Add annotations to all the deployed resources +## +commonAnnotations: {} +## @param namespaceOverride Override namespace for ZooKeeper resources +## Useful when including ZooKeeper as a chart dependency, so it can be released into a different namespace than the parent +## +namespaceOverride: "" +## @param usePasswordFiles Mount credentials as files instead of using environment variables +## +usePasswordFiles: true +## Enable diagnostic mode in the statefulset +## +diagnosticMode: + ## @param diagnosticMode.enabled Enable diagnostic mode (all probes will be disabled and the command will be overridden) + ## + enabled: false + ## @param diagnosticMode.command Command to override all containers in the statefulset + ## + command: + - sleep + ## @param diagnosticMode.args Args to override all containers in the statefulset + ## + args: + - infinity +## @section ZooKeeper chart parameters + +## Bitnami ZooKeeper image version +## ref: https://hub.docker.com/r/bitnami/zookeeper/tags/ +## @param image.registry [default: REGISTRY_NAME] ZooKeeper image registry +## @param image.repository [default: REPOSITORY_NAME/zookeeper] ZooKeeper image repository +## @skip image.tag ZooKeeper image tag (immutable tags are recommended) +## @param image.digest ZooKeeper image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag +## @param image.pullPolicy ZooKeeper image pull policy +## @param image.pullSecrets Specify docker-registry secret names as an array +## @param image.debug Specify if debug values should be set +## +image: + registry: docker.io + repository: bitnami/zookeeper + tag: 3.9.3-debian-12-r16 + digest: "" + ## Specify a imagePullPolicy + ## ref: https://kubernetes.io/docs/concepts/containers/images/#pre-pulled-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## Example: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Set to true if you would like to see extra information on logs + ## + debug: false +## Authentication parameters +## +auth: + client: + ## @param auth.client.enabled Enable ZooKeeper client-server authentication. It uses SASL/Digest-MD5 + ## + enabled: false + ## @param auth.client.clientUser User that will use ZooKeeper clients to auth + ## + clientUser: "" + ## @param auth.client.clientPassword Password that will use ZooKeeper clients to auth + ## + clientPassword: "" + ## @param auth.client.serverUsers Comma, semicolon or whitespace separated list of user to be created + ## Specify them as a string, for example: "user1,user2,admin" + ## + serverUsers: "" + ## @param auth.client.serverPasswords Comma, semicolon or whitespace separated list of passwords to assign to users when created + ## Specify them as a string, for example: "pass4user1, pass4user2, pass4admin" + ## + serverPasswords: "" + ## @param auth.client.existingSecret Use existing secret (ignores previous passwords) + ## + existingSecret: "" + quorum: + ## @param auth.quorum.enabled Enable ZooKeeper server-server authentication. It uses SASL/Digest-MD5 + ## + enabled: false + ## @param auth.quorum.learnerUser User that the ZooKeeper quorumLearner will use to authenticate to quorumServers. + ## Note: Make sure the user is included in auth.quorum.serverUsers + ## + learnerUser: "" + ## @param auth.quorum.learnerPassword Password that the ZooKeeper quorumLearner will use to authenticate to quorumServers. + ## + learnerPassword: "" + ## @param auth.quorum.serverUsers Comma, semicolon or whitespace separated list of users for the quorumServers. + ## Specify them as a string, for example: "user1,user2,admin" + ## + serverUsers: "" + ## @param auth.quorum.serverPasswords Comma, semicolon or whitespace separated list of passwords to assign to users when created + ## Specify them as a string, for example: "pass4user1, pass4user2, pass4admin" + ## + serverPasswords: "" + ## @param auth.quorum.existingSecret Use existing secret (ignores previous passwords) + ## + existingSecret: "" +## @param tickTime Basic time unit (in milliseconds) used by ZooKeeper for heartbeats +## +tickTime: 2000 +## @param initLimit ZooKeeper uses to limit the length of time the ZooKeeper servers in quorum have to connect to a leader +## +initLimit: 10 +## @param syncLimit How far out of date a server can be from a leader +## +syncLimit: 5 +## @param preAllocSize Block size for transaction log file +## +preAllocSize: 65536 +## @param snapCount The number of transactions recorded in the transaction log before a snapshot can be taken (and the transaction log rolled) +## +snapCount: 100000 +## @param maxClientCnxns Limits the number of concurrent connections that a single client may make to a single member of the ZooKeeper ensemble +## +maxClientCnxns: 60 +## @param maxSessionTimeout Maximum session timeout (in milliseconds) that the server will allow the client to negotiate +## Defaults to 20 times the tickTime +## +maxSessionTimeout: 40000 +## @param heapSize Size (in MB) for the Java Heap options (Xmx and Xms) +## This env var is ignored if Xmx an Xms are configured via `jvmFlags` +## +heapSize: 1024 +## @param fourlwCommandsWhitelist A list of comma separated Four Letter Words commands that can be executed +## +fourlwCommandsWhitelist: srvr, mntr, ruok +## @param minServerId Minimal SERVER_ID value, nodes increment their IDs respectively +## Servers increment their ID starting at this minimal value. +## E.g., with `minServerId=10` and 3 replicas, server IDs will be 10, 11, 12 for z-0, z-1 and z-2 respectively. +## +minServerId: 1 +## @param listenOnAllIPs Allow ZooKeeper to listen for connections from its peers on all available IP addresses +## +listenOnAllIPs: false +## @param zooServers ZooKeeper space separated servers list. Leave empty to use the default ZooKeeper server names. +## +zooServers: "" +## Ongoing data directory cleanup configuration +## +autopurge: + ## @param autopurge.snapRetainCount The most recent snapshots amount (and corresponding transaction logs) to retain + ## + snapRetainCount: 10 + ## @param autopurge.purgeInterval The time interval (in hours) for which the purge task has to be triggered + ## Set to a positive integer to enable the auto purging. Set to 0 to disable auto purging. + ## + purgeInterval: 1 +## @param logLevel Log level for the ZooKeeper server. ERROR by default +## Have in mind if you set it to INFO or WARN the ReadinessProve will produce a lot of logs +## +logLevel: ERROR +## @param jvmFlags Default JVM flags for the ZooKeeper process +## +jvmFlags: "" +## @param dataLogDir Dedicated data log directory +## This allows a dedicated log device to be used, and helps avoid competition between logging and snapshots. +## E.g. +## dataLogDir: /bitnami/zookeeper/dataLog +## +dataLogDir: "" +## @param configuration Configure ZooKeeper with a custom zoo.cfg file +## e.g: +## configuration: |- +## deploy-working-dir=/bitnami/zookeeper/data +## log-level=info +## ... +## +configuration: "" +## @param existingConfigmap The name of an existing ConfigMap with your custom configuration for ZooKeeper +## NOTE: When it's set the `configuration` parameter is ignored +## +existingConfigmap: "" +## @param extraEnvVars Array with extra environment variables to add to ZooKeeper nodes +## e.g: +## extraEnvVars: +## - name: FOO +## value: "bar" +## +extraEnvVars: [] +## @param extraEnvVarsCM Name of existing ConfigMap containing extra env vars for ZooKeeper nodes +## +extraEnvVarsCM: "" +## @param extraEnvVarsSecret Name of existing Secret containing extra env vars for ZooKeeper nodes +## +extraEnvVarsSecret: "" +## @param command Override default container command (useful when using custom images) +## +command: + - /scripts/setup.sh +## @param args Override default container args (useful when using custom images) +## +args: [] +## @section Statefulset parameters + +## @param replicaCount Number of ZooKeeper nodes +## +replicaCount: 1 +## @param revisionHistoryLimit The number of old history to retain to allow rollback +## +revisionHistoryLimit: 10 +## @param containerPorts.client ZooKeeper client container port +## @param containerPorts.tls ZooKeeper TLS container port +## @param containerPorts.follower ZooKeeper follower container port +## @param containerPorts.election ZooKeeper election container port +## @param containerPorts.adminServer ZooKeeper admin server container port +## @param containerPorts.metrics ZooKeeper Prometheus Exporter container port +## +containerPorts: + client: 2181 + tls: 3181 + follower: 2888 + election: 3888 + adminServer: 8080 + metrics: 9141 +## Configure extra options for ZooKeeper containers' liveness, readiness and startup probes +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes +## @param livenessProbe.enabled Enable livenessProbe on ZooKeeper containers +## @param livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe +## @param livenessProbe.periodSeconds Period seconds for livenessProbe +## @param livenessProbe.timeoutSeconds Timeout seconds for livenessProbe +## @param livenessProbe.failureThreshold Failure threshold for livenessProbe +## @param livenessProbe.successThreshold Success threshold for livenessProbe +## @param livenessProbe.probeCommandTimeout Probe command timeout for livenessProbe +## +livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + probeCommandTimeout: 3 +## @param readinessProbe.enabled Enable readinessProbe on ZooKeeper containers +## @param readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe +## @param readinessProbe.periodSeconds Period seconds for readinessProbe +## @param readinessProbe.timeoutSeconds Timeout seconds for readinessProbe +## @param readinessProbe.failureThreshold Failure threshold for readinessProbe +## @param readinessProbe.successThreshold Success threshold for readinessProbe +## @param readinessProbe.probeCommandTimeout Probe command timeout for readinessProbe +## +readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + probeCommandTimeout: 2 +## @param startupProbe.enabled Enable startupProbe on ZooKeeper containers +## @param startupProbe.initialDelaySeconds Initial delay seconds for startupProbe +## @param startupProbe.periodSeconds Period seconds for startupProbe +## @param startupProbe.timeoutSeconds Timeout seconds for startupProbe +## @param startupProbe.failureThreshold Failure threshold for startupProbe +## @param startupProbe.successThreshold Success threshold for startupProbe +## +startupProbe: + enabled: false + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 1 + failureThreshold: 15 + successThreshold: 1 +## @param customLivenessProbe Custom livenessProbe that overrides the default one +## +customLivenessProbe: {} +## @param customReadinessProbe Custom readinessProbe that overrides the default one +## +customReadinessProbe: {} +## @param customStartupProbe Custom startupProbe that overrides the default one +## +customStartupProbe: {} +## @param lifecycleHooks for the ZooKeeper container(s) to automate configuration before or after startup +## +lifecycleHooks: {} +## ZooKeeper resource requests and limits +## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ +## @param resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production). +## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15 +## +resourcesPreset: "micro" +## @param resources Set container requests and limits for different resources like CPU or memory (essential for production workloads) +## Example: +## resources: +## requests: +## cpu: 2 +## memory: 512Mi +## limits: +## cpu: 3 +## memory: 1024Mi +## +resources: + requests: + cpu: 200m + memory: 256Mi + limits: + cpu: 500m + memory: 1Gi +## Configure Pods Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod +## @param podSecurityContext.enabled Enabled ZooKeeper pods' Security Context +## @param podSecurityContext.fsGroupChangePolicy Set filesystem group change policy +## @param podSecurityContext.sysctls Set kernel settings using the sysctl interface +## @param podSecurityContext.supplementalGroups Set filesystem extra groups +## @param podSecurityContext.fsGroup Set ZooKeeper pod's Security Context fsGroup +## +podSecurityContext: + enabled: true + fsGroupChangePolicy: Always + sysctls: [] + supplementalGroups: [] + fsGroup: 1001 +## Configure Container Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container +## @param containerSecurityContext.enabled Enabled containers' Security Context +## @param containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container +## @param containerSecurityContext.runAsUser Set containers' Security Context runAsUser +## @param containerSecurityContext.runAsGroup Set containers' Security Context runAsGroup +## @param containerSecurityContext.runAsNonRoot Set container's Security Context runAsNonRoot +## @param containerSecurityContext.privileged Set container's Security Context privileged +## @param containerSecurityContext.readOnlyRootFilesystem Set container's Security Context readOnlyRootFilesystem +## @param containerSecurityContext.allowPrivilegeEscalation Set container's Security Context allowPrivilegeEscalation +## @param containerSecurityContext.capabilities.drop List of capabilities to be dropped +## @param containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile +## +containerSecurityContext: + enabled: true + seLinuxOptions: {} + runAsUser: 1001 + runAsGroup: 1001 + runAsNonRoot: true + privileged: false + readOnlyRootFilesystem: true + allowPrivilegeEscalation: false + capabilities: + drop: ["ALL"] + seccompProfile: + type: "RuntimeDefault" +## @param automountServiceAccountToken Mount Service Account token in pod +## +automountServiceAccountToken: false +## @param hostAliases ZooKeeper pods host aliases +## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ +## +hostAliases: [] +## @param podLabels Extra labels for ZooKeeper pods +## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ +## +podLabels: {} +## @param podAnnotations Annotations for ZooKeeper pods +## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ +## +podAnnotations: {} +## @param podAffinityPreset Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` +## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity +## +podAffinityPreset: "" +## @param podAntiAffinityPreset Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` +## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity +## +podAntiAffinityPreset: soft +## Node affinity preset +## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity +## +nodeAffinityPreset: + ## @param nodeAffinityPreset.type Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` + ## + type: "" + ## @param nodeAffinityPreset.key Node label key to match Ignored if `affinity` is set. + ## E.g. + ## key: "kubernetes.io/e2e-az-name" + ## + key: "" + ## @param nodeAffinityPreset.values Node label values to match. Ignored if `affinity` is set. + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] +## @param affinity Affinity for pod assignment +## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +## Note: podAffinityPreset, podAntiAffinityPreset, and nodeAffinityPreset will be ignored when it's set +## +affinity: {} +## @param nodeSelector Node labels for pod assignment +## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/ +## +nodeSelector: {} +## @param tolerations Tolerations for pod assignment +## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ +## +tolerations: [] +## @param topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template +## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods +## +topologySpreadConstraints: [] +## @param podManagementPolicy StatefulSet controller supports relax its ordering guarantees while preserving its uniqueness and identity guarantees. There are two valid pod management policies: `OrderedReady` and `Parallel` +## ref: https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#pod-management-policy +## +podManagementPolicy: Parallel +## @param priorityClassName Name of the existing priority class to be used by ZooKeeper pods, priority class needs to be created beforehand +## Ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ +## +priorityClassName: "" +## @param schedulerName Kubernetes pod scheduler registry +## https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +schedulerName: "" +## @param updateStrategy.type ZooKeeper statefulset strategy type +## @param updateStrategy.rollingUpdate ZooKeeper statefulset rolling update configuration parameters +## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies +## +updateStrategy: + type: RollingUpdate + rollingUpdate: {} +## @param extraVolumes Optionally specify extra list of additional volumes for the ZooKeeper pod(s) +## Example Use Case: mount certificates to enable TLS +## e.g: +## extraVolumes: +## - name: zookeeper-keystore +## secret: +## defaultMode: 288 +## secretName: zookeeper-keystore +## - name: zookeeper-truststore +## secret: +## defaultMode: 288 +## secretName: zookeeper-truststore +## +extraVolumes: [] +## @param extraVolumeMounts Optionally specify extra list of additional volumeMounts for the ZooKeeper container(s) +## Example Use Case: mount certificates to enable TLS +## e.g: +## extraVolumeMounts: +## - name: zookeeper-keystore +## mountPath: /certs/keystore +## readOnly: true +## - name: zookeeper-truststore +## mountPath: /certs/truststore +## readOnly: true +## +extraVolumeMounts: [] +## @param sidecars Add additional sidecar containers to the ZooKeeper pod(s) +## e.g: +## sidecars: +## - name: your-image-name +## image: your-image +## imagePullPolicy: Always +## ports: +## - name: portname +## containerPort: 1234 +## +sidecars: [] +## @param initContainers Add additional init containers to the ZooKeeper pod(s) +## Example: +## initContainers: +## - name: your-image-name +## image: your-image +## imagePullPolicy: Always +## ports: +## - name: portname +## containerPort: 1234 +## +initContainers: [] +## ZooKeeper Pod Disruption Budget +## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ +## @param pdb.create Deploy a pdb object for the ZooKeeper pod +## @param pdb.minAvailable Minimum available ZooKeeper replicas +## @param pdb.maxUnavailable Maximum unavailable ZooKeeper replicas. Defaults to `1` if both `pdb.minAvailable` and `pdb.maxUnavailable` are empty. +## +pdb: + create: true + minAvailable: "" + maxUnavailable: "" +## @param enableServiceLinks Whether information about services should be injected into pod's environment variable +## The environment variables injected by service links are not used, but can lead to slow boot times or slow running of the scripts when there are many services in the current namespace. +## If you experience slow pod startups or slow running of the scripts you probably want to set this to `false`. +## +enableServiceLinks: true +## DNS-Pod services +## Ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/ +## @param dnsPolicy Specifies the DNS policy for the zookeeper pods +## DNS policies can be set on a per-Pod basis. Currently Kubernetes supports the following Pod-specific DNS policies. +## Available options: Default, ClusterFirst, ClusterFirstWithHostNet, None +## Ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy +dnsPolicy: "" +## @param dnsConfig allows users more control on the DNS settings for a Pod. Required if `dnsPolicy` is set to `None` +## The dnsConfig field is optional and it can work with any dnsPolicy settings. +## Ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-dns-config +## E.g. +## dnsConfig: +## nameservers: +## - 192.0.2.1 # this is an example +## searches: +## - ns1.svc.cluster-domain.example +## - my.dns.search.suffix +## options: +## - name: ndots +## value: "2" +## - name: edns0 +dnsConfig: {} +## @section Traffic Exposure parameters +service: + ## @param service.type Kubernetes Service type + ## + type: ClusterIP + ## @param service.ports.client ZooKeeper client service port + ## @param service.ports.tls ZooKeeper TLS service port + ## @param service.ports.follower ZooKeeper follower service port + ## @param service.ports.election ZooKeeper election service port + ## + ports: + client: 2181 + tls: 3181 + follower: 2888 + election: 3888 + ## Node ports to expose + ## NOTE: choose port between <30000-32767> + ## @param service.nodePorts.client Node port for clients + ## @param service.nodePorts.tls Node port for TLS + ## + nodePorts: + client: "" + tls: "" + ## @param service.disableBaseClientPort Remove client port from service definitions. + ## + disableBaseClientPort: false + ## @param service.sessionAffinity Control where client requests go, to the same pod or round-robin + ## Values: ClientIP or None + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/ + ## + sessionAffinity: None + ## @param service.sessionAffinityConfig Additional settings for the sessionAffinity + ## sessionAffinityConfig: + ## clientIP: + ## timeoutSeconds: 300 + ## + sessionAffinityConfig: {} + ## @param service.clusterIP ZooKeeper service Cluster IP + ## e.g.: + ## clusterIP: None + ## + clusterIP: "" + ## @param service.loadBalancerIP ZooKeeper service Load Balancer IP + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-loadbalancer + ## + loadBalancerIP: "" + ## @param service.loadBalancerSourceRanges ZooKeeper service Load Balancer sources + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## e.g: + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## @param service.externalTrafficPolicy ZooKeeper service external traffic policy + ## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + ## @param service.annotations Additional custom annotations for ZooKeeper service + ## + annotations: {} + ## @param service.extraPorts Extra ports to expose in the ZooKeeper service (normally used with the `sidecar` value) + ## + extraPorts: [] + ## @param service.headless.annotations Annotations for the Headless Service + ## @param service.headless.publishNotReadyAddresses If the ZooKeeper headless service should publish DNS records for not ready pods + ## @param service.headless.servicenameOverride String to partially override headless service name + ## + headless: + publishNotReadyAddresses: true + annotations: {} + servicenameOverride: "" +## Network policies +## Ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/ +## +networkPolicy: + ## @param networkPolicy.enabled Specifies whether a NetworkPolicy should be created + ## + enabled: true + ## @param networkPolicy.allowExternal Don't require client label for connections + ## When set to false, only pods with the correct client label will have network access to the port Redis® is + ## listening on. When true, zookeeper accept connections from any source (with the correct destination port). + ## + allowExternal: true + ## @param networkPolicy.allowExternalEgress Allow the pod to access any range of port and all destinations. + ## + allowExternalEgress: true + ## @param networkPolicy.extraIngress [array] Add extra ingress rules to the NetworkPolicy + ## e.g: + ## extraIngress: + ## - ports: + ## - port: 1234 + ## from: + ## - podSelector: + ## - matchLabels: + ## - role: frontend + ## - podSelector: + ## - matchExpressions: + ## - key: role + ## operator: In + ## values: + ## - frontend + extraIngress: [] + ## @param networkPolicy.extraEgress [array] Add extra ingress rules to the NetworkPolicy + ## e.g: + ## extraEgress: + ## - ports: + ## - port: 1234 + ## to: + ## - podSelector: + ## - matchLabels: + ## - role: frontend + ## - podSelector: + ## - matchExpressions: + ## - key: role + ## operator: In + ## values: + ## - frontend + ## + extraEgress: [] + ## @param networkPolicy.ingressNSMatchLabels [object] Labels to match to allow traffic from other namespaces + ## @param networkPolicy.ingressNSPodMatchLabels [object] Pod labels to match to allow traffic from other namespaces + ## + ingressNSMatchLabels: {} + ingressNSPodMatchLabels: {} +## @section Other Parameters + +## Service account for ZooKeeper to use. +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ +## +serviceAccount: + ## @param serviceAccount.create Enable creation of ServiceAccount for ZooKeeper pod + ## + create: true + ## @param serviceAccount.name The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the common.names.fullname template + ## + name: "" + ## @param serviceAccount.automountServiceAccountToken Allows auto mount of ServiceAccountToken on the serviceAccount created + ## Can be set to false if pods using this serviceAccount do not need to use K8s API + ## + automountServiceAccountToken: false + ## @param serviceAccount.annotations Additional custom annotations for the ServiceAccount + ## + annotations: {} +## @section Persistence parameters + +## Enable persistence using Persistent Volume Claims +## ref: https://kubernetes.io/docs/concepts/storage/persistent-volumes/ +## +persistence: + ## @param persistence.enabled Enable ZooKeeper data persistence using PVC. If false, use emptyDir + ## + enabled: true + ## @param persistence.existingClaim Name of an existing PVC to use (only when deploying a single replica) + ## + existingClaim: "" + ## @param persistence.storageClass PVC Storage Class for ZooKeeper data volume + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + storageClass: "" + ## @param persistence.accessModes PVC Access modes + ## + accessModes: + - ReadWriteOnce + ## @param persistence.size PVC Storage Request for ZooKeeper data volume + ## + size: 8Gi + ## @param persistence.annotations Annotations for the PVC + ## + annotations: {} + ## @param persistence.labels Labels for the PVC + ## + labels: {} + ## @param persistence.selector Selector to match an existing Persistent Volume for ZooKeeper's data PVC + ## If set, the PVC can't have a PV dynamically provisioned for it + ## E.g. + ## selector: + ## matchLabels: + ## app: my-app + ## + selector: {} + ## Persistence for a dedicated data log directory + ## + dataLogDir: + ## @param persistence.dataLogDir.size PVC Storage Request for ZooKeeper's dedicated data log directory + ## + size: 8Gi + ## @param persistence.dataLogDir.existingClaim Provide an existing `PersistentVolumeClaim` for ZooKeeper's data log directory + ## If defined, PVC must be created manually before volume will be bound + ## The value is evaluated as a template + ## + existingClaim: "" + ## @param persistence.dataLogDir.selector Selector to match an existing Persistent Volume for ZooKeeper's data log PVC + ## If set, the PVC can't have a PV dynamically provisioned for it + ## E.g. + ## selector: + ## matchLabels: + ## app: my-app + ## + selector: {} +## @section Volume Permissions parameters +## + +## Init containers parameters: +## volumePermissions: Change the owner and group of the persistent volume(s) mountpoint(s) to 'runAsUser:fsGroup' on each node +## +volumePermissions: + ## @param volumePermissions.enabled Enable init container that changes the owner and group of the persistent volume + ## + enabled: false + ## @param volumePermissions.image.registry [default: REGISTRY_NAME] Init container volume-permissions image registry + ## @param volumePermissions.image.repository [default: REPOSITORY_NAME/os-shell] Init container volume-permissions image repository + ## @skip volumePermissions.image.tag Init container volume-permissions image tag (immutable tags are recommended) + ## @param volumePermissions.image.digest Init container volume-permissions image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag + ## @param volumePermissions.image.pullPolicy Init container volume-permissions image pull policy + ## @param volumePermissions.image.pullSecrets Init container volume-permissions image pull secrets + ## + image: + registry: docker.io + repository: bitnami/os-shell + tag: 12-debian-12-r45 + digest: "" + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## Example: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Init container resource requests and limits + ## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ + ## @param volumePermissions.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if volumePermissions.resources is set (volumePermissions.resources is recommended for production). + ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15 + ## + resourcesPreset: "nano" + ## @param volumePermissions.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads) + ## Example: + ## resources: + ## requests: + ## cpu: 2 + ## memory: 512Mi + ## limits: + ## cpu: 3 + ## memory: 1024Mi + ## + resources: {} + ## Init container' Security Context + ## Note: the chown of the data folder is done to containerSecurityContext.runAsUser + ## and not the below volumePermissions.containerSecurityContext.runAsUser + ## @param volumePermissions.containerSecurityContext.enabled Enabled init container Security Context + ## @param volumePermissions.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container + ## @param volumePermissions.containerSecurityContext.runAsUser User ID for the init container + ## + containerSecurityContext: + enabled: true + seLinuxOptions: {} + runAsUser: 0 +## @section Metrics parameters +## + +## ZooKeeper Prometheus Exporter configuration +## +metrics: + ## @param metrics.enabled Enable Prometheus to access ZooKeeper metrics endpoint + ## + enabled: false + ## Service configuration + ## + service: + ## @param metrics.service.type ZooKeeper Prometheus Exporter service type + ## + type: ClusterIP + ## @param metrics.service.port ZooKeeper Prometheus Exporter service port + ## + port: 9141 + ## @param metrics.service.annotations [object] Annotations for Prometheus to auto-discover the metrics endpoint + ## + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "{{ .Values.metrics.service.port }}" + prometheus.io/path: "/metrics" + ## Prometheus Operator ServiceMonitor configuration + ## + serviceMonitor: + ## @param metrics.serviceMonitor.enabled Create ServiceMonitor Resource for scraping metrics using Prometheus Operator + ## + enabled: false + ## @param metrics.serviceMonitor.namespace Namespace for the ServiceMonitor Resource (defaults to the Release Namespace) + ## + namespace: "" + ## @param metrics.serviceMonitor.interval Interval at which metrics should be scraped. + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + ## + interval: "" + ## @param metrics.serviceMonitor.scrapeTimeout Timeout after which the scrape is ended + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + ## + scrapeTimeout: "" + ## @param metrics.serviceMonitor.additionalLabels Additional labels that can be used so ServiceMonitor will be discovered by Prometheus + ## + additionalLabels: {} + ## @param metrics.serviceMonitor.selector Prometheus instance selector labels + ## ref: https://github.com/bitnami/charts/tree/main/bitnami/prometheus-operator#prometheus-configuration + ## + selector: {} + ## @param metrics.serviceMonitor.relabelings RelabelConfigs to apply to samples before scraping + ## + relabelings: [] + ## @param metrics.serviceMonitor.metricRelabelings MetricRelabelConfigs to apply to samples before ingestion + ## + metricRelabelings: [] + ## @param metrics.serviceMonitor.honorLabels Specify honorLabels parameter to add the scrape endpoint + ## + honorLabels: false + ## @param metrics.serviceMonitor.jobLabel The name of the label on the target service to use as the job name in prometheus. + ## + jobLabel: "" + ## @param metrics.serviceMonitor.scheme The explicit scheme for metrics scraping. + ## + scheme: "" + ## @param metrics.serviceMonitor.tlsConfig [object] TLS configuration used for scrape endpoints used by Prometheus + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#tlsconfig + ## e.g: + ## tlsConfig: + ## ca: + ## secret: + ## name: existingSecretName + ## + tlsConfig: {} + ## Prometheus Operator PrometheusRule configuration + ## + prometheusRule: + ## @param metrics.prometheusRule.enabled Create a PrometheusRule for Prometheus Operator + ## + enabled: false + ## @param metrics.prometheusRule.namespace Namespace for the PrometheusRule Resource (defaults to the Release Namespace) + ## + namespace: "" + ## @param metrics.prometheusRule.additionalLabels Additional labels that can be used so PrometheusRule will be discovered by Prometheus + ## + additionalLabels: {} + ## @param metrics.prometheusRule.rules PrometheusRule definitions + ## - alert: ZooKeeperSyncedFollowers + ## annotations: + ## message: The number of synced followers for the leader node in ZooKeeper deployment my-release is less than 2. This usually means that some of the ZooKeeper nodes aren't communicating properly. If it doesn't resolve itself you can try killing the pods (one by one). + ## expr: max(synced_followers{service="my-release-metrics"}) < 2 + ## for: 5m + ## labels: + ## severity: critical + ## - alert: ZooKeeperOutstandingRequests + ## annotations: + ## message: The number of outstanding requests for ZooKeeper pod {{ $labels.pod }} is greater than 10. This can indicate a performance issue with the Pod or cluster a whole. + ## expr: outstanding_requests{service="my-release-metrics"} > 10 + ## for: 5m + ## labels: + ## severity: critical + ## + rules: [] +## @section TLS/SSL parameters +## + +## Enable SSL/TLS encryption +## +tls: + client: + ## @param tls.client.enabled Enable TLS for client connections + ## + enabled: false + ## @param tls.client.auth SSL Client auth. Can be "none", "want" or "need". + ## + auth: "none" + ## @param tls.client.autoGenerated Generate automatically self-signed TLS certificates for ZooKeeper client communications + ## Currently only supports PEM certificates + ## + autoGenerated: false + ## @param tls.client.existingSecret Name of the existing secret containing the TLS certificates for ZooKeeper client communications + ## + existingSecret: "" + ## @param tls.client.existingSecretKeystoreKey The secret key from the tls.client.existingSecret containing the Keystore. + ## + existingSecretKeystoreKey: "" + ## @param tls.client.existingSecretTruststoreKey The secret key from the tls.client.existingSecret containing the Truststore. + ## + existingSecretTruststoreKey: "" + ## @param tls.client.keystorePath Location of the KeyStore file used for Client connections + ## + keystorePath: /opt/bitnami/zookeeper/config/certs/client/zookeeper.keystore.jks + ## @param tls.client.truststorePath Location of the TrustStore file used for Client connections + ## + truststorePath: /opt/bitnami/zookeeper/config/certs/client/zookeeper.truststore.jks + ## @param tls.client.passwordsSecretName Existing secret containing Keystore and truststore passwords + ## + passwordsSecretName: "" + ## @param tls.client.passwordsSecretKeystoreKey The secret key from the tls.client.passwordsSecretName containing the password for the Keystore. + ## + passwordsSecretKeystoreKey: "" + ## @param tls.client.passwordsSecretTruststoreKey The secret key from the tls.client.passwordsSecretName containing the password for the Truststore. + ## + passwordsSecretTruststoreKey: "" + ## @param tls.client.keystorePassword Password to access KeyStore if needed + ## + keystorePassword: "" + ## @param tls.client.truststorePassword Password to access TrustStore if needed + ## + truststorePassword: "" + quorum: + ## @param tls.quorum.enabled Enable TLS for quorum protocol + ## + enabled: false + ## @param tls.quorum.auth SSL Quorum Client auth. Can be "none", "want" or "need". + ## + auth: "none" + ## @param tls.quorum.autoGenerated Create self-signed TLS certificates. Currently only supports PEM certificates. + ## + autoGenerated: false + ## @param tls.quorum.existingSecret Name of the existing secret containing the TLS certificates for ZooKeeper quorum protocol + ## + existingSecret: "" + ## @param tls.quorum.existingSecretKeystoreKey The secret key from the tls.quorum.existingSecret containing the Keystore. + ## + existingSecretKeystoreKey: "" + ## @param tls.quorum.existingSecretTruststoreKey The secret key from the tls.quorum.existingSecret containing the Truststore. + ## + existingSecretTruststoreKey: "" + ## @param tls.quorum.keystorePath Location of the KeyStore file used for Quorum protocol + ## + keystorePath: /opt/bitnami/zookeeper/config/certs/quorum/zookeeper.keystore.jks + ## @param tls.quorum.truststorePath Location of the TrustStore file used for Quorum protocol + ## + truststorePath: /opt/bitnami/zookeeper/config/certs/quorum/zookeeper.truststore.jks + ## @param tls.quorum.passwordsSecretName Existing secret containing Keystore and truststore passwords + ## + passwordsSecretName: "" + ## @param tls.quorum.passwordsSecretKeystoreKey The secret key from the tls.quorum.passwordsSecretName containing the password for the Keystore. + ## + passwordsSecretKeystoreKey: "" + ## @param tls.quorum.passwordsSecretTruststoreKey The secret key from the tls.quorum.passwordsSecretName containing the password for the Truststore. + ## + passwordsSecretTruststoreKey: "" + ## @param tls.quorum.keystorePassword Password to access KeyStore if needed + ## + keystorePassword: "" + ## @param tls.quorum.truststorePassword Password to access TrustStore if needed + ## + truststorePassword: "" + ## Init container resource requests and limits + ## ref: https://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ + ## @param tls.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if tls.resources is set (tls.resources is recommended for production). + ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15 + ## + resourcesPreset: "nano" + ## @param tls.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads) + ## Example: + ## resources: + ## requests: + ## cpu: 2 + ## memory: 512Mi + ## limits: + ## cpu: 3 + ## memory: 1024Mi + ## + resources: {} diff --git a/cluster/manifests/freeleaps-data-platform/zookeeper/vpa.yaml b/cluster/manifests/freeleaps-data-platform/zookeeper/vpa.yaml new file mode 100644 index 00000000..f95ec71f --- /dev/null +++ b/cluster/manifests/freeleaps-data-platform/zookeeper/vpa.yaml @@ -0,0 +1,21 @@ +apiVersion: autoscaling.k8s.io/v1 +kind: VerticalPodAutoscaler +metadata: + name: zookeeper-vpa + namespace: freeleaps-data-platform +spec: + resourcePolicy: + containerPolicies: + - containerName: '*' + controlledResources: + - cpu + - memory + maxAllowed: + cpu: 200m + memory: 512Mi + targetRef: + apiVersion: apps/v1 + kind: StatefulSet + name: zookeeper + updatePolicy: + updateMode: "Auto" \ No newline at end of file diff --git a/freeleaps/helm-pkg/freeleaps/values.prod.yaml b/freeleaps/helm-pkg/freeleaps/values.prod.yaml index 7bb326ec..a4b9e5d4 100644 --- a/freeleaps/helm-pkg/freeleaps/values.prod.yaml +++ b/freeleaps/helm-pkg/freeleaps/values.prod.yaml @@ -34,7 +34,7 @@ freeleaps: port: 8001 initialDelaySeconds: 5 periodSeconds: 30 - timeoutSeconds: 3 + timeoutSeconds: 60 successThreshold: 1 failureThreshold: 3 liveness: @@ -44,7 +44,7 @@ freeleaps: port: 8001 initialDelaySeconds: 5 periodSeconds: 15 - timeoutSeconds: 3 + timeoutSeconds: 60 successThreshold: 1 failureThreshold: 3 terminationGracePeriodSeconds: 30