From c2e01c63c0073c05f2b1cf7e1b65ea8227c81235 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=AD=99=E6=8C=AF=E5=AE=87?= <> Date: Mon, 13 Jan 2025 00:16:43 +0800 Subject: [PATCH] feat(manifests): add infra service manifests --- .../cert-manager/.gitkeep | 0 .../cert-manager/values.yaml | 1455 +++++ .../godaddy-webhook/.gitkeep | 0 .../godaddy-webhook/values.yaml | 25 + .../ingress-nginx/.gitkeep | 0 .../ingress-nginx/values.yaml | 1224 ++++ .../freeleaps-controls-system/namespace.yaml | 6 + .../kube-prometheus-stack/.gitkeep | 0 .../kube-prometheus-stack/values.yaml | 5022 +++++++++++++++++ 9 files changed, 7732 insertions(+) delete mode 100644 cluster/manifests/freeleaps-controls-system/cert-manager/.gitkeep create mode 100644 cluster/manifests/freeleaps-controls-system/cert-manager/values.yaml delete mode 100644 cluster/manifests/freeleaps-controls-system/godaddy-webhook/.gitkeep create mode 100644 cluster/manifests/freeleaps-controls-system/godaddy-webhook/values.yaml delete mode 100644 cluster/manifests/freeleaps-controls-system/ingress-nginx/.gitkeep create mode 100644 cluster/manifests/freeleaps-controls-system/ingress-nginx/values.yaml create mode 100644 cluster/manifests/freeleaps-controls-system/namespace.yaml delete mode 100644 cluster/manifests/freeleaps-monitoring-system/kube-prometheus-stack/.gitkeep create mode 100644 cluster/manifests/freeleaps-monitoring-system/kube-prometheus-stack/values.yaml diff --git a/cluster/manifests/freeleaps-controls-system/cert-manager/.gitkeep b/cluster/manifests/freeleaps-controls-system/cert-manager/.gitkeep deleted file mode 100644 index e69de29b..00000000 diff --git a/cluster/manifests/freeleaps-controls-system/cert-manager/values.yaml b/cluster/manifests/freeleaps-controls-system/cert-manager/values.yaml new file mode 100644 index 00000000..f5fbeccd --- /dev/null +++ b/cluster/manifests/freeleaps-controls-system/cert-manager/values.yaml @@ -0,0 +1,1455 @@ +# +docs:section=Global + +# Default values for cert-manager. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. +global: + # Reference to one or more secrets to be used when pulling images. + # For more information, see [Pull an Image from a Private Registry](https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/). + # + # For example: + # imagePullSecrets: + # - name: "image-pull-secret" + imagePullSecrets: [] + + # Labels to apply to all resources. + # Please note that this does not add labels to the resources created dynamically by the controllers. + # For these resources, you have to add the labels in the template in the cert-manager custom resource: + # For example, podTemplate/ ingressTemplate in ACMEChallengeSolverHTTP01Ingress + # For more information, see the [cert-manager documentation](https://cert-manager.io/docs/reference/api-docs/#acme.cert-manager.io/v1.ACMEChallengeSolverHTTP01Ingress). + # For example, secretTemplate in CertificateSpec + # For more information, see the [cert-manager documentation](https://cert-manager.io/docs/reference/api-docs/#cert-manager.io/v1.CertificateSpec). + commonLabels: {} + + # The number of old ReplicaSets to retain to allow rollback (if not set, the default Kubernetes value is set to 10). + # +docs:property + # revisionHistoryLimit: 1 + + # The optional priority class to be used for the cert-manager pods. + priorityClassName: "" + + rbac: + # Create required ClusterRoles and ClusterRoleBindings for cert-manager. + create: true + # Aggregate ClusterRoles to Kubernetes default user-facing roles. For more information, see [User-facing roles](https://kubernetes.io/docs/reference/access-authn-authz/rbac/#user-facing-roles) + aggregateClusterRoles: true + + podSecurityPolicy: + # Create PodSecurityPolicy for cert-manager. + # + # Note that PodSecurityPolicy was deprecated in Kubernetes 1.21 and removed in Kubernetes 1.25. + enabled: false + # Configure the PodSecurityPolicy to use AppArmor. + useAppArmor: true + + # Set the verbosity of cert-manager. A range of 0 - 6, with 6 being the most verbose. + logLevel: 2 + + leaderElection: + # Override the namespace used for the leader election lease. + namespace: "kube-system" + + # The duration that non-leader candidates will wait after observing a + # leadership renewal until attempting to acquire leadership of a led but + # unrenewed leader slot. This is effectively the maximum duration that a + # leader can be stopped before it is replaced by another candidate. + # +docs:property + # leaseDuration: 60s + + # The interval between attempts by the acting master to renew a leadership + # slot before it stops leading. This must be less than or equal to the + # lease duration. + # +docs:property + # renewDeadline: 40s + + # The duration the clients should wait between attempting acquisition and + # renewal of a leadership. + # +docs:property + # retryPeriod: 15s + +# This option is equivalent to setting crds.enabled=true and crds.keep=true. +# Deprecated: use crds.enabled and crds.keep instead. +installCRDs: false + +crds: + # This option decides if the CRDs should be installed + # as part of the Helm installation. + enabled: true + + # This option makes it so that the "helm.sh/resource-policy": keep + # annotation is added to the CRD. This will prevent Helm from uninstalling + # the CRD when the Helm release is uninstalled. + # WARNING: when the CRDs are removed, all cert-manager custom resources + # (Certificates, Issuers, ...) will be removed too by the garbage collector. + keep: true + +# +docs:section=Controller + +# The number of replicas of the cert-manager controller to run. +# +# The default is 1, but in production set this to 2 or 3 to provide high +# availability. +# +# If `replicas > 1`, consider setting `podDisruptionBudget.enabled=true`. +# +# Note that cert-manager uses leader election to ensure that there can +# only be a single instance active at a time. +replicaCount: 1 + +# Deployment update strategy for the cert-manager controller deployment. +# For more information, see the [Kubernetes documentation](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy). +# +# For example: +# strategy: +# type: RollingUpdate +# rollingUpdate: +# maxSurge: 0 +# maxUnavailable: 1 +strategy: {} + +podDisruptionBudget: + # Enable or disable the PodDisruptionBudget resource. + # + # This prevents downtime during voluntary disruptions such as during a Node upgrade. + # For example, the PodDisruptionBudget will block `kubectl drain` + # if it is used on the Node where the only remaining cert-manager + # Pod is currently running. + enabled: false + + # This configures the minimum available pods for disruptions. It can either be set to + # an integer (e.g. 1) or a percentage value (e.g. 25%). + # It cannot be used if `maxUnavailable` is set. + # +docs:property + # +docs:type=unknown + # minAvailable: 1 + + # This configures the maximum unavailable pods for disruptions. It can either be set to + # an integer (e.g. 1) or a percentage value (e.g. 25%). + # it cannot be used if `minAvailable` is set. + # +docs:property + # +docs:type=unknown + # maxUnavailable: 1 + +# A comma-separated list of feature gates that should be enabled on the +# controller pod. +featureGates: "" + +# The maximum number of challenges that can be scheduled as 'processing' at once. +maxConcurrentChallenges: 60 + +image: + # The container registry to pull the manager image from. + # +docs:property + # registry: quay.io + + # The container image for the cert-manager controller. + # +docs:property + repository: quay.io/jetstack/cert-manager-controller + + # Override the image tag to deploy by setting this variable. + # If no value is set, the chart's appVersion is used. + # +docs:property + # tag: vX.Y.Z + + # Setting a digest will override any tag. + # +docs:property + # digest: sha256:0e072dddd1f7f8fc8909a2ca6f65e76c5f0d2fcfb8be47935ae3457e8bbceb20 + + # Kubernetes imagePullPolicy on Deployment. + pullPolicy: IfNotPresent + +# Override the namespace used to store DNS provider credentials etc. for ClusterIssuer +# resources. By default, the same namespace as cert-manager is deployed within is +# used. This namespace will not be automatically created by the Helm chart. +clusterResourceNamespace: "" + +# This namespace allows you to define where the services are installed into. +# If not set then they use the namespace of the release. +# This is helpful when installing cert manager as a chart dependency (sub chart). +namespace: "freeleaps-controls-system" + +# Override the "cert-manager.fullname" value. This value is used as part of +# most of the names of the resources created by this Helm chart. +# +docs:property +# fullnameOverride: "my-cert-manager" + +# Override the "cert-manager.name" value, which is used to annotate some of +# the resources that are created by this Chart (using "app.kubernetes.io/name"). +# NOTE: There are some inconsistencies in the Helm chart when it comes to +# these annotations (some resources use eg. "cainjector.name" which resolves +# to the value "cainjector"). +# +docs:property +# nameOverride: "my-cert-manager" + +serviceAccount: + # Specifies whether a service account should be created. + create: true + + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template. + # +docs:property + # name: "" + + # Optional additional annotations to add to the controller's Service Account. + # +docs:property + # annotations: {} + + # Optional additional labels to add to the controller's Service Account. + # +docs:property + # labels: {} + + # Automount API credentials for a Service Account. + automountServiceAccountToken: true + +# Automounting API credentials for a particular pod. +# +docs:property +# automountServiceAccountToken: true + +# When this flag is enabled, secrets will be automatically removed when the certificate resource is deleted. +enableCertificateOwnerRef: false + +# This property is used to configure options for the controller pod. +# This allows setting options that would usually be provided using flags. +# +# If `apiVersion` and `kind` are unspecified they default to the current latest +# version (currently `controller.config.cert-manager.io/v1alpha1`). You can pin +# the version by specifying the `apiVersion` yourself. +# +# For example: +# config: +# apiVersion: controller.config.cert-manager.io/v1alpha1 +# kind: ControllerConfiguration +# logging: +# verbosity: 2 +# format: text +# leaderElectionConfig: +# namespace: kube-system +# kubernetesAPIQPS: 9000 +# kubernetesAPIBurst: 9000 +# numberOfConcurrentWorkers: 200 +# featureGates: +# AdditionalCertificateOutputFormats: true +# DisallowInsecureCSRUsageDefinition: true +# ExperimentalCertificateSigningRequestControllers: true +# ExperimentalGatewayAPISupport: true +# LiteralCertificateSubject: true +# SecretsFilteredCaching: true +# ServerSideApply: true +# StableCertificateRequestName: true +# UseCertificateRequestBasicConstraints: true +# ValidateCAA: true +# # Configure the metrics server for TLS +# # See https://cert-manager.io/docs/devops-tips/prometheus-metrics/#tls +# metricsTLSConfig: +# dynamic: +# secretNamespace: "cert-manager" +# secretName: "cert-manager-metrics-ca" +# dnsNames: +# - cert-manager-metrics +config: {} + +# Setting Nameservers for DNS01 Self Check. +# For more information, see the [cert-manager documentation](https://cert-manager.io/docs/configuration/acme/dns01/#setting-nameservers-for-dns01-self-check). + +# A comma-separated string with the host and port of the recursive nameservers cert-manager should query. +dns01RecursiveNameservers: "" + +# Forces cert-manager to use only the recursive nameservers for verification. +# Enabling this option could cause the DNS01 self check to take longer owing to caching performed by the recursive nameservers. +dns01RecursiveNameserversOnly: false + +# Option to disable cert-manager's build-in auto-approver. The auto-approver +# approves all CertificateRequests that reference issuers matching the 'approveSignerNames' +# option. This 'disableAutoApproval' option is useful when you want to make all approval decisions +# using a different approver (like approver-policy - https://github.com/cert-manager/approver-policy). +disableAutoApproval: false + +# List of signer names that cert-manager will approve by default. CertificateRequests +# referencing these signer names will be auto-approved by cert-manager. Defaults to just +# approving the cert-manager.io Issuer and ClusterIssuer issuers. When set to an empty +# array, ALL issuers will be auto-approved by cert-manager. To disable the auto-approval, +# because eg. you are using approver-policy, you can enable 'disableAutoApproval'. +# ref: https://cert-manager.io/docs/concepts/certificaterequest/#approval +# +docs:property +approveSignerNames: +- issuers.cert-manager.io/* +- clusterissuers.cert-manager.io/* + +# Additional command line flags to pass to cert-manager controller binary. +# To see all available flags run `docker run quay.io/jetstack/cert-manager-controller: --help`. +# +# Use this flag to enable or disable arbitrary controllers. For example, to disable the CertificateRequests approver. +# +# For example: +# extraArgs: +# - --controllers=*,-certificaterequests-approver +extraArgs: [] + +# Additional environment variables to pass to cert-manager controller binary. +# For example: +# extraEnv: +# - name: SOME_VAR +# value: 'some value' +extraEnv: [] + +# Resources to provide to the cert-manager controller pod. +# +# For example: +# requests: +# cpu: 10m +# memory: 32Mi +# +# For more information, see [Resource Management for Pods and Containers](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/). +resources: {} + +# Pod Security Context. +# For more information, see [Configure a Security Context for a Pod or Container](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/). +# +docs:property +securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + +# Container Security Context to be set on the controller component container. +# For more information, see [Configure a Security Context for a Pod or Container](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/). +# +docs:property +containerSecurityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + +# Additional volumes to add to the cert-manager controller pod. +volumes: [] + +# Additional volume mounts to add to the cert-manager controller container. +volumeMounts: [] + +# Optional additional annotations to add to the controller Deployment. +# +docs:property +# deploymentAnnotations: {} + +# Optional additional annotations to add to the controller Pods. +# +docs:property +# podAnnotations: {} + +# Optional additional labels to add to the controller Pods. +podLabels: {} + +# Optional annotations to add to the controller Service. +# +docs:property +# serviceAnnotations: {} + +# Optional additional labels to add to the controller Service. +# +docs:property +# serviceLabels: {} + +# Optionally set the IP family policy for the controller Service to configure dual-stack; see [Configure dual-stack](https://kubernetes.io/docs/concepts/services-networking/dual-stack/#services). +# +docs:property +# serviceIPFamilyPolicy: "" + +# Optionally set the IP families for the controller Service that should be supported, in the order in which they should be applied to ClusterIP. Can be IPv4 and/or IPv6. +# +docs:property +# serviceIPFamilies: [] + +# Optional DNS settings. These are useful if you have a public and private DNS zone for +# the same domain on Route 53. The following is an example of ensuring +# cert-manager can access an ingress or DNS TXT records at all times. +# Note that this requires Kubernetes 1.10 or `CustomPodDNS` feature gate enabled for +# the cluster to work. + +# Pod DNS policy. +# For more information, see [Pod's DNS Policy](https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-s-dns-policy). +# +docs:property +# podDnsPolicy: "None" + +# Pod DNS configuration. The podDnsConfig field is optional and can work with any podDnsPolicy +# settings. However, when a Pod's dnsPolicy is set to "None", the dnsConfig field has to be specified. +# For more information, see [Pod's DNS Config](https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-dns-config). +# +docs:property +# podDnsConfig: +# nameservers: +# - "1.1.1.1" +# - "8.8.8.8" + +# Optional hostAliases for cert-manager-controller pods. May be useful when performing ACME DNS-01 self checks. +hostAliases: [] +# - ip: 127.0.0.1 +# hostnames: +# - foo.local +# - bar.local +# - ip: 10.1.2.3 +# hostnames: +# - foo.remote +# - bar.remote + +# The nodeSelector on Pods tells Kubernetes to schedule Pods on the nodes with +# matching labels. +# For more information, see [Assigning Pods to Nodes](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/). +# +# This default ensures that Pods are only scheduled to Linux nodes. +# It prevents Pods being scheduled to Windows nodes in a mixed OS cluster. +# +docs:property +nodeSelector: + kubernetes.io/os: linux + +# +docs:ignore +ingressShim: {} + + # Optional default issuer to use for ingress resources. + # +docs:property=ingressShim.defaultIssuerName + # defaultIssuerName: "" + + # Optional default issuer kind to use for ingress resources. + # +docs:property=ingressShim.defaultIssuerKind + # defaultIssuerKind: "" + + # Optional default issuer group to use for ingress resources. + # +docs:property=ingressShim.defaultIssuerGroup + # defaultIssuerGroup: "" + +# Use these variables to configure the HTTP_PROXY environment variables. + +# Configures the HTTP_PROXY environment variable where a HTTP proxy is required. +# +docs:property +# http_proxy: "http://proxy:8080" + +# Configures the HTTPS_PROXY environment variable where a HTTP proxy is required. +# +docs:property +# https_proxy: "https://proxy:8080" + +# Configures the NO_PROXY environment variable where a HTTP proxy is required, +# but certain domains should be excluded. +# +docs:property +# no_proxy: 127.0.0.1,localhost + + +# A Kubernetes Affinity, if required. For more information, see [Affinity v1 core](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#affinity-v1-core). +# +# For example: +# affinity: +# nodeAffinity: +# requiredDuringSchedulingIgnoredDuringExecution: +# nodeSelectorTerms: +# - matchExpressions: +# - key: foo.bar.com/role +# operator: In +# values: +# - master +affinity: {} + +# A list of Kubernetes Tolerations, if required. For more information, see [Toleration v1 core](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#toleration-v1-core). +# +# For example: +# tolerations: +# - key: foo.bar.com/role +# operator: Equal +# value: master +# effect: NoSchedule +tolerations: [] + +# A list of Kubernetes TopologySpreadConstraints, if required. For more information, see [Topology spread constraint v1 core](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#topologyspreadconstraint-v1-core +# +# For example: +# topologySpreadConstraints: +# - maxSkew: 2 +# topologyKey: topology.kubernetes.io/zone +# whenUnsatisfiable: ScheduleAnyway +# labelSelector: +# matchLabels: +# app.kubernetes.io/instance: cert-manager +# app.kubernetes.io/component: controller +topologySpreadConstraints: [] + +# LivenessProbe settings for the controller container of the controller Pod. +# +# This is enabled by default, in order to enable the clock-skew liveness probe that +# restarts the controller in case of a skew between the system clock and the monotonic clock. +# LivenessProbe durations and thresholds are based on those used for the Kubernetes +# controller-manager. For more information see the following on the +# [Kubernetes GitHub repository](https://github.com/kubernetes/kubernetes/blob/806b30170c61a38fedd54cc9ede4cd6275a1ad3b/cmd/kubeadm/app/util/staticpod/utils.go#L241-L245) +# +docs:property +livenessProbe: + enabled: true + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 15 + successThreshold: 1 + failureThreshold: 8 + +# enableServiceLinks indicates whether information about services should be +# injected into the pod's environment variables, matching the syntax of Docker +# links. +enableServiceLinks: false + +# +docs:section=Prometheus + +prometheus: + # Enable Prometheus monitoring for the cert-manager controller and webhook. + # If you use the Prometheus Operator, set prometheus.podmonitor.enabled or + # prometheus.servicemonitor.enabled, to create a PodMonitor or a + # ServiceMonitor resource. + # Otherwise, 'prometheus.io' annotations are added to the cert-manager and + # cert-manager-webhook Deployments. + # Note that you can not enable both PodMonitor and ServiceMonitor as they are + # mutually exclusive. Enabling both will result in an error. + enabled: false + + servicemonitor: + # Create a ServiceMonitor to add cert-manager to Prometheus. + enabled: false + + # The namespace that the service monitor should live in, defaults + # to the cert-manager namespace. + # +docs:property + # namespace: cert-manager + + # Specifies the `prometheus` label on the created ServiceMonitor. This is + # used when different Prometheus instances have label selectors matching + # different ServiceMonitors. + prometheusInstance: default + + # The target port to set on the ServiceMonitor. This must match the port that the + # cert-manager controller is listening on for metrics. + targetPort: 9402 + + # The path to scrape for metrics. + path: /metrics + + # The interval to scrape metrics. + interval: 60s + + # The timeout before a metrics scrape fails. + scrapeTimeout: 30s + + # Additional labels to add to the ServiceMonitor. + labels: {} + + # Additional annotations to add to the ServiceMonitor. + annotations: {} + + # Keep labels from scraped data, overriding server-side labels. + honorLabels: false + + # EndpointAdditionalProperties allows setting additional properties on the + # endpoint such as relabelings, metricRelabelings etc. + # + # For example: + # endpointAdditionalProperties: + # relabelings: + # - action: replace + # sourceLabels: + # - __meta_kubernetes_pod_node_name + # targetLabel: instance + # + # +docs:property + endpointAdditionalProperties: {} + + # Note that you can not enable both PodMonitor and ServiceMonitor as they are mutually exclusive. Enabling both will result in an error. + podmonitor: + # Create a PodMonitor to add cert-manager to Prometheus. + enabled: false + + # The namespace that the pod monitor should live in, defaults + # to the cert-manager namespace. + # +docs:property + # namespace: cert-manager + + # Specifies the `prometheus` label on the created PodMonitor. This is + # used when different Prometheus instances have label selectors matching + # different PodMonitors. + prometheusInstance: default + + # The path to scrape for metrics. + path: /metrics + + # The interval to scrape metrics. + interval: 60s + + # The timeout before a metrics scrape fails. + scrapeTimeout: 30s + + # Additional labels to add to the PodMonitor. + labels: {} + + # Additional annotations to add to the PodMonitor. + annotations: {} + + # Keep labels from scraped data, overriding server-side labels. + honorLabels: false + + # EndpointAdditionalProperties allows setting additional properties on the + # endpoint such as relabelings, metricRelabelings etc. + # + # For example: + # endpointAdditionalProperties: + # relabelings: + # - action: replace + # sourceLabels: + # - __meta_kubernetes_pod_node_name + # targetLabel: instance + # # Configure the PodMonitor for TLS connections + # # See https://cert-manager.io/docs/devops-tips/prometheus-metrics/#tls + # scheme: https + # tlsConfig: + # serverName: cert-manager-metrics + # ca: + # secret: + # name: cert-manager-metrics-ca + # key: "tls.crt" + # + # +docs:property + endpointAdditionalProperties: {} + +# +docs:section=Webhook + +webhook: + # Number of replicas of the cert-manager webhook to run. + # + # The default is 1, but in production set this to 2 or 3 to provide high + # availability. + # + # If `replicas > 1`, consider setting `webhook.podDisruptionBudget.enabled=true`. + replicaCount: 1 + + # The number of seconds the API server should wait for the webhook to respond before treating the call as a failure. + # The value must be between 1 and 30 seconds. For more information, see + # [Validating webhook configuration v1](https://kubernetes.io/docs/reference/kubernetes-api/extend-resources/validating-webhook-configuration-v1/). + # + # The default is set to the maximum value of 30 seconds as + # users sometimes report that the connection between the K8S API server and + # the cert-manager webhook server times out. + # If *this* timeout is reached, the error message will be "context deadline exceeded", + # which doesn't help the user diagnose what phase of the HTTPS connection timed out. + # For example, it could be during DNS resolution, TCP connection, TLS + # negotiation, HTTP negotiation, or slow HTTP response from the webhook + # server. + # By setting this timeout to its maximum value the underlying timeout error + # message has more chance of being returned to the end user. + timeoutSeconds: 30 + + # This is used to configure options for the webhook pod. + # This allows setting options that would usually be provided using flags. + # + # If `apiVersion` and `kind` are unspecified they default to the current latest + # version (currently `webhook.config.cert-manager.io/v1alpha1`). You can pin + # the version by specifying the `apiVersion` yourself. + # + # For example: + # apiVersion: webhook.config.cert-manager.io/v1alpha1 + # kind: WebhookConfiguration + # # The port that the webhook listens on for requests. + # # In GKE private clusters, by default Kubernetes apiservers are allowed to + # # talk to the cluster nodes only on 443 and 10250. Configuring + # # securePort: 10250 therefore will work out-of-the-box without needing to add firewall + # # rules or requiring NET_BIND_SERVICE capabilities to bind port numbers < 1000. + # # This should be uncommented and set as a default by the chart once + # # the apiVersion of WebhookConfiguration graduates beyond v1alpha1. + # securePort: 10250 + # # Configure the metrics server for TLS + # # See https://cert-manager.io/docs/devops-tips/prometheus-metrics/#tls + # metricsTLSConfig: + # dynamic: + # secretNamespace: "cert-manager" + # secretName: "cert-manager-metrics-ca" + # dnsNames: + # - cert-manager-metrics + config: {} + + # The update strategy for the cert-manager webhook deployment. + # For more information, see the [Kubernetes documentation](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy) + # + # For example: + # strategy: + # type: RollingUpdate + # rollingUpdate: + # maxSurge: 0 + # maxUnavailable: 1 + strategy: {} + + # Pod Security Context to be set on the webhook component Pod. + # For more information, see [Configure a Security Context for a Pod or Container](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/). + # +docs:property + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + + # Container Security Context to be set on the webhook component container. + # For more information, see [Configure a Security Context for a Pod or Container](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/). + # +docs:property + containerSecurityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + + podDisruptionBudget: + # Enable or disable the PodDisruptionBudget resource. + # + # This prevents downtime during voluntary disruptions such as during a Node upgrade. + # For example, the PodDisruptionBudget will block `kubectl drain` + # if it is used on the Node where the only remaining cert-manager + # Pod is currently running. + enabled: false + + # This property configures the minimum available pods for disruptions. Can either be set to + # an integer (e.g. 1) or a percentage value (e.g. 25%). + # It cannot be used if `maxUnavailable` is set. + # +docs:property + # +docs:type=unknown + # minAvailable: 1 + + # This property configures the maximum unavailable pods for disruptions. Can either be set to + # an integer (e.g. 1) or a percentage value (e.g. 25%). + # It cannot be used if `minAvailable` is set. + # +docs:property + # +docs:type=unknown + # maxUnavailable: 1 + + # Optional additional annotations to add to the webhook Deployment. + # +docs:property + # deploymentAnnotations: {} + + # Optional additional annotations to add to the webhook Pods. + # +docs:property + # podAnnotations: {} + + # Optional additional annotations to add to the webhook Service. + # +docs:property + # serviceAnnotations: {} + + # Optional additional annotations to add to the webhook MutatingWebhookConfiguration. + # +docs:property + # mutatingWebhookConfigurationAnnotations: {} + + # Optional additional annotations to add to the webhook ValidatingWebhookConfiguration. + # +docs:property + # validatingWebhookConfigurationAnnotations: {} + + validatingWebhookConfiguration: + # Configure spec.namespaceSelector for validating webhooks. + # +docs:property + namespaceSelector: + matchExpressions: + - key: "cert-manager.io/disable-validation" + operator: "NotIn" + values: + - "true" + + mutatingWebhookConfiguration: + # Configure spec.namespaceSelector for mutating webhooks. + # +docs:property + namespaceSelector: {} + # matchLabels: + # key: value + # matchExpressions: + # - key: kubernetes.io/metadata.name + # operator: NotIn + # values: + # - kube-system + + + # Additional command line flags to pass to cert-manager webhook binary. + # To see all available flags run `docker run quay.io/jetstack/cert-manager-webhook: --help`. + extraArgs: [] + # Path to a file containing a WebhookConfiguration object used to configure the webhook. + # - --config= + + # Additional environment variables to pass to cert-manager webhook binary. + # For example: + # extraEnv: + # - name: SOME_VAR + # value: 'some value' + extraEnv: [] + + # Comma separated list of feature gates that should be enabled on the + # webhook pod. + featureGates: "" + + # Resources to provide to the cert-manager webhook pod. + # + # For example: + # requests: + # cpu: 10m + # memory: 32Mi + # + # For more information, see [Resource Management for Pods and Containers](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/). + resources: {} + + # Liveness probe values. + # For more information, see [Container probes](https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes). + # + # +docs:property + livenessProbe: + failureThreshold: 3 + initialDelaySeconds: 60 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + + # Readiness probe values. + # For more information, see [Container probes](https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes). + # + # +docs:property + readinessProbe: + failureThreshold: 3 + initialDelaySeconds: 5 + periodSeconds: 5 + successThreshold: 1 + timeoutSeconds: 1 + + # The nodeSelector on Pods tells Kubernetes to schedule Pods on the nodes with + # matching labels. + # For more information, see [Assigning Pods to Nodes](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/). + # + # This default ensures that Pods are only scheduled to Linux nodes. + # It prevents Pods being scheduled to Windows nodes in a mixed OS cluster. + # +docs:property + nodeSelector: + kubernetes.io/os: linux + + # A Kubernetes Affinity, if required. For more information, see [Affinity v1 core](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#affinity-v1-core). + # + # For example: + # affinity: + # nodeAffinity: + # requiredDuringSchedulingIgnoredDuringExecution: + # nodeSelectorTerms: + # - matchExpressions: + # - key: foo.bar.com/role + # operator: In + # values: + # - master + affinity: {} + + # A list of Kubernetes Tolerations, if required. For more information, see [Toleration v1 core](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#toleration-v1-core). + # + # For example: + # tolerations: + # - key: foo.bar.com/role + # operator: Equal + # value: master + # effect: NoSchedule + tolerations: [] + + # A list of Kubernetes TopologySpreadConstraints, if required. For more information, see [Topology spread constraint v1 core](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#topologyspreadconstraint-v1-core). + # + # For example: + # topologySpreadConstraints: + # - maxSkew: 2 + # topologyKey: topology.kubernetes.io/zone + # whenUnsatisfiable: ScheduleAnyway + # labelSelector: + # matchLabels: + # app.kubernetes.io/instance: cert-manager + # app.kubernetes.io/component: controller + topologySpreadConstraints: [] + + # Optional additional labels to add to the Webhook Pods. + podLabels: {} + + # Optional additional labels to add to the Webhook Service. + serviceLabels: {} + + # Optionally set the IP family policy for the controller Service to configure dual-stack; see [Configure dual-stack](https://kubernetes.io/docs/concepts/services-networking/dual-stack/#services). + serviceIPFamilyPolicy: "" + + # Optionally set the IP families for the controller Service that should be supported, in the order in which they should be applied to ClusterIP. Can be IPv4 and/or IPv6. + serviceIPFamilies: [] + + image: + # The container registry to pull the webhook image from. + # +docs:property + # registry: quay.io + + # The container image for the cert-manager webhook + # +docs:property + repository: quay.io/jetstack/cert-manager-webhook + + # Override the image tag to deploy by setting this variable. + # If no value is set, the chart's appVersion will be used. + # +docs:property + # tag: vX.Y.Z + + # Setting a digest will override any tag + # +docs:property + # digest: sha256:0e072dddd1f7f8fc8909a2ca6f65e76c5f0d2fcfb8be47935ae3457e8bbceb20 + + # Kubernetes imagePullPolicy on Deployment. + pullPolicy: IfNotPresent + + serviceAccount: + # Specifies whether a service account should be created. + create: true + + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template. + # +docs:property + # name: "" + + # Optional additional annotations to add to the webhook's Service Account. + # +docs:property + # annotations: {} + + # Optional additional labels to add to the webhook's Service Account. + # +docs:property + # labels: {} + + # Automount API credentials for a Service Account. + automountServiceAccountToken: true + + # Automounting API credentials for a particular pod. + # +docs:property + # automountServiceAccountToken: true + + # The port that the webhook listens on for requests. + # In GKE private clusters, by default Kubernetes apiservers are allowed to + # talk to the cluster nodes only on 443 and 10250. Configuring + # securePort: 10250, therefore will work out-of-the-box without needing to add firewall + # rules or requiring NET_BIND_SERVICE capabilities to bind port numbers <1000. + securePort: 10250 + + # Specifies if the webhook should be started in hostNetwork mode. + # + # Required for use in some managed kubernetes clusters (such as AWS EKS) with custom + # CNI (such as calico), because control-plane managed by AWS cannot communicate + # with pods' IP CIDR and admission webhooks are not working + # + # Since the default port for the webhook conflicts with kubelet on the host + # network, `webhook.securePort` should be changed to an available port if + # running in hostNetwork mode. + hostNetwork: false + + # Specifies how the service should be handled. Useful if you want to expose the + # webhook outside of the cluster. In some cases, the control plane cannot + # reach internal services. + serviceType: ClusterIP + + # Specify the load balancer IP for the created service. + # +docs:property + # loadBalancerIP: "10.10.10.10" + + # Overrides the mutating webhook and validating webhook so they reach the webhook + # service using the `url` field instead of a service. + url: {} + # host: + + # Enables default network policies for webhooks. + networkPolicy: + # Create network policies for the webhooks. + enabled: false + + # Ingress rule for the webhook network policy. By default, it allows all + # inbound traffic. + # +docs:property + ingress: + - from: + - ipBlock: + cidr: 0.0.0.0/0 + + # Egress rule for the webhook network policy. By default, it allows all + # outbound traffic to ports 80 and 443, as well as DNS ports. + # +docs:property + egress: + - ports: + - port: 80 + protocol: TCP + - port: 443 + protocol: TCP + - port: 53 + protocol: TCP + - port: 53 + protocol: UDP + # On OpenShift and OKD, the Kubernetes API server listens on. + # port 6443. + - port: 6443 + protocol: TCP + to: + - ipBlock: + cidr: 0.0.0.0/0 + + # Additional volumes to add to the cert-manager controller pod. + volumes: [] + + # Additional volume mounts to add to the cert-manager controller container. + volumeMounts: [] + + # enableServiceLinks indicates whether information about services should be + # injected into the pod's environment variables, matching the syntax of Docker + # links. + enableServiceLinks: false + +# +docs:section=CA Injector + +cainjector: + # Create the CA Injector deployment + enabled: true + + # The number of replicas of the cert-manager cainjector to run. + # + # The default is 1, but in production set this to 2 or 3 to provide high + # availability. + # + # If `replicas > 1`, consider setting `cainjector.podDisruptionBudget.enabled=true`. + # + # Note that cert-manager uses leader election to ensure that there can + # only be a single instance active at a time. + replicaCount: 1 + + # This is used to configure options for the cainjector pod. + # It allows setting options that are usually provided via flags. + # + # If `apiVersion` and `kind` are unspecified they default to the current latest + # version (currently `cainjector.config.cert-manager.io/v1alpha1`). You can pin + # the version by specifying the `apiVersion` yourself. + # + # For example: + # apiVersion: cainjector.config.cert-manager.io/v1alpha1 + # kind: CAInjectorConfiguration + # logging: + # verbosity: 2 + # format: text + # leaderElectionConfig: + # namespace: kube-system + # # Configure the metrics server for TLS + # # See https://cert-manager.io/docs/devops-tips/prometheus-metrics/#tls + # metricsTLSConfig: + # dynamic: + # secretNamespace: "cert-manager" + # secretName: "cert-manager-metrics-ca" + # dnsNames: + # - cert-manager-metrics + config: {} + + # Deployment update strategy for the cert-manager cainjector deployment. + # For more information, see the [Kubernetes documentation](https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy). + # + # For example: + # strategy: + # type: RollingUpdate + # rollingUpdate: + # maxSurge: 0 + # maxUnavailable: 1 + strategy: {} + + # Pod Security Context to be set on the cainjector component Pod + # For more information, see [Configure a Security Context for a Pod or Container](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/). + # +docs:property + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + + # Container Security Context to be set on the cainjector component container + # For more information, see [Configure a Security Context for a Pod or Container](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/). + # +docs:property + containerSecurityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + + podDisruptionBudget: + # Enable or disable the PodDisruptionBudget resource. + # + # This prevents downtime during voluntary disruptions such as during a Node upgrade. + # For example, the PodDisruptionBudget will block `kubectl drain` + # if it is used on the Node where the only remaining cert-manager + # Pod is currently running. + enabled: false + + # `minAvailable` configures the minimum available pods for disruptions. It can either be set to + # an integer (e.g. 1) or a percentage value (e.g. 25%). + # Cannot be used if `maxUnavailable` is set. + # +docs:property + # +docs:type=unknown + # minAvailable: 1 + + # `maxUnavailable` configures the maximum unavailable pods for disruptions. It can either be set to + # an integer (e.g. 1) or a percentage value (e.g. 25%). + # Cannot be used if `minAvailable` is set. + # +docs:property + # +docs:type=unknown + # maxUnavailable: 1 + + # Optional additional annotations to add to the cainjector Deployment. + # +docs:property + # deploymentAnnotations: {} + + # Optional additional annotations to add to the cainjector Pods. + # +docs:property + # podAnnotations: {} + + # Optional additional annotations to add to the cainjector metrics Service. + # +docs:property + # serviceAnnotations: {} + + # Additional command line flags to pass to cert-manager cainjector binary. + # To see all available flags run `docker run quay.io/jetstack/cert-manager-cainjector: --help`. + extraArgs: [] + # Enable profiling for cainjector. + # - --enable-profiling=true + + # Additional environment variables to pass to cert-manager cainjector binary. + # For example: + # extraEnv: + # - name: SOME_VAR + # value: 'some value' + extraEnv: [] + + # Comma separated list of feature gates that should be enabled on the + # cainjector pod. + featureGates: "" + + # Resources to provide to the cert-manager cainjector pod. + # + # For example: + # requests: + # cpu: 10m + # memory: 32Mi + # + # For more information, see [Resource Management for Pods and Containers](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/). + resources: {} + + + # The nodeSelector on Pods tells Kubernetes to schedule Pods on the nodes with + # matching labels. + # For more information, see [Assigning Pods to Nodes](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/). + # + # This default ensures that Pods are only scheduled to Linux nodes. + # It prevents Pods being scheduled to Windows nodes in a mixed OS cluster. + # +docs:property + nodeSelector: + kubernetes.io/os: linux + + # A Kubernetes Affinity, if required. For more information, see [Affinity v1 core](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#affinity-v1-core). + # + # For example: + # affinity: + # nodeAffinity: + # requiredDuringSchedulingIgnoredDuringExecution: + # nodeSelectorTerms: + # - matchExpressions: + # - key: foo.bar.com/role + # operator: In + # values: + # - master + affinity: {} + + # A list of Kubernetes Tolerations, if required. For more information, see [Toleration v1 core](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#toleration-v1-core). + # + # For example: + # tolerations: + # - key: foo.bar.com/role + # operator: Equal + # value: master + # effect: NoSchedule + tolerations: [] + + # A list of Kubernetes TopologySpreadConstraints, if required. For more information, see [Topology spread constraint v1 core](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#topologyspreadconstraint-v1-core). + # + # For example: + # topologySpreadConstraints: + # - maxSkew: 2 + # topologyKey: topology.kubernetes.io/zone + # whenUnsatisfiable: ScheduleAnyway + # labelSelector: + # matchLabels: + # app.kubernetes.io/instance: cert-manager + # app.kubernetes.io/component: controller + topologySpreadConstraints: [] + + # Optional additional labels to add to the CA Injector Pods. + podLabels: {} + + # Optional additional labels to add to the CA Injector metrics Service. + serviceLabels: {} + + image: + # The container registry to pull the cainjector image from. + # +docs:property + # registry: quay.io + + # The container image for the cert-manager cainjector + # +docs:property + repository: quay.io/jetstack/cert-manager-cainjector + + # Override the image tag to deploy by setting this variable. + # If no value is set, the chart's appVersion will be used. + # +docs:property + # tag: vX.Y.Z + + # Setting a digest will override any tag. + # +docs:property + # digest: sha256:0e072dddd1f7f8fc8909a2ca6f65e76c5f0d2fcfb8be47935ae3457e8bbceb20 + + # Kubernetes imagePullPolicy on Deployment. + pullPolicy: IfNotPresent + + serviceAccount: + # Specifies whether a service account should be created. + create: true + + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + # +docs:property + # name: "" + + # Optional additional annotations to add to the cainjector's Service Account. + # +docs:property + # annotations: {} + + # Optional additional labels to add to the cainjector's Service Account. + # +docs:property + # labels: {} + + # Automount API credentials for a Service Account. + automountServiceAccountToken: true + + # Automounting API credentials for a particular pod. + # +docs:property + # automountServiceAccountToken: true + + # Additional volumes to add to the cert-manager controller pod. + volumes: [] + + # Additional volume mounts to add to the cert-manager controller container. + volumeMounts: [] + + # enableServiceLinks indicates whether information about services should be + # injected into the pod's environment variables, matching the syntax of Docker + # links. + enableServiceLinks: false + +# +docs:section=ACME Solver + +acmesolver: + image: + # The container registry to pull the acmesolver image from. + # +docs:property + # registry: quay.io + + # The container image for the cert-manager acmesolver. + # +docs:property + repository: quay.io/jetstack/cert-manager-acmesolver + + # Override the image tag to deploy by setting this variable. + # If no value is set, the chart's appVersion is used. + # +docs:property + # tag: vX.Y.Z + + # Setting a digest will override any tag. + # +docs:property + # digest: sha256:0e072dddd1f7f8fc8909a2ca6f65e76c5f0d2fcfb8be47935ae3457e8bbceb20 + + # Kubernetes imagePullPolicy on Deployment. + pullPolicy: IfNotPresent + +# +docs:section=Startup API Check +# This startupapicheck is a Helm post-install hook that waits for the webhook +# endpoints to become available. +# The check is implemented using a Kubernetes Job - if you are injecting mesh +# sidecar proxies into cert-manager pods, ensure that they +# are not injected into this Job's pod. Otherwise, the installation may time out +# owing to the Job never being completed because the sidecar proxy does not exit. +# For more information, see [this note](https://github.com/cert-manager/cert-manager/pull/4414). + +startupapicheck: + # Enables the startup api check. + enabled: true + + # Pod Security Context to be set on the startupapicheck component Pod. + # For more information, see [Configure a Security Context for a Pod or Container](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/). + # +docs:property + securityContext: + runAsNonRoot: true + seccompProfile: + type: RuntimeDefault + + # Container Security Context to be set on the controller component container. + # For more information, see [Configure a Security Context for a Pod or Container](https://kubernetes.io/docs/tasks/configure-pod-container/security-context/). + # +docs:property + containerSecurityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + + # Timeout for 'kubectl check api' command. + timeout: 1m + + # Job backoffLimit + backoffLimit: 4 + + # Optional additional annotations to add to the startupapicheck Job. + # +docs:property + jobAnnotations: + helm.sh/hook: post-install + helm.sh/hook-weight: "1" + helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded + + # Optional additional annotations to add to the startupapicheck Pods. + # +docs:property + # podAnnotations: {} + + # Additional command line flags to pass to startupapicheck binary. + # To see all available flags run `docker run quay.io/jetstack/cert-manager-startupapicheck: --help`. + # + # Verbose logging is enabled by default so that if startupapicheck fails, you + # can know what exactly caused the failure. Verbose logs include details of + # the webhook URL, IP address and TCP connect errors for example. + # +docs:property + extraArgs: + - -v + + # Additional environment variables to pass to cert-manager startupapicheck binary. + # For example: + # extraEnv: + # - name: SOME_VAR + # value: 'some value' + extraEnv: [] + + # Resources to provide to the cert-manager controller pod. + # + # For example: + # requests: + # cpu: 10m + # memory: 32Mi + # + # For more information, see [Resource Management for Pods and Containers](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/). + resources: {} + + + # The nodeSelector on Pods tells Kubernetes to schedule Pods on the nodes with + # matching labels. + # For more information, see [Assigning Pods to Nodes](https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/). + # + # This default ensures that Pods are only scheduled to Linux nodes. + # It prevents Pods being scheduled to Windows nodes in a mixed OS cluster. + # +docs:property + nodeSelector: + kubernetes.io/os: linux + + # A Kubernetes Affinity, if required. For more information, see [Affinity v1 core](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#affinity-v1-core). + # For example: + # affinity: + # nodeAffinity: + # requiredDuringSchedulingIgnoredDuringExecution: + # nodeSelectorTerms: + # - matchExpressions: + # - key: foo.bar.com/role + # operator: In + # values: + # - master + affinity: {} + + # A list of Kubernetes Tolerations, if required. For more information, see [Toleration v1 core](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#toleration-v1-core). + # + # For example: + # tolerations: + # - key: foo.bar.com/role + # operator: Equal + # value: master + # effect: NoSchedule + tolerations: [] + + # Optional additional labels to add to the startupapicheck Pods. + podLabels: {} + + image: + # The container registry to pull the startupapicheck image from. + # +docs:property + # registry: quay.io + + # The container image for the cert-manager startupapicheck. + # +docs:property + repository: quay.io/jetstack/cert-manager-startupapicheck + + # Override the image tag to deploy by setting this variable. + # If no value is set, the chart's appVersion is used. + # +docs:property + # tag: vX.Y.Z + + # Setting a digest will override any tag. + # +docs:property + # digest: sha256:0e072dddd1f7f8fc8909a2ca6f65e76c5f0d2fcfb8be47935ae3457e8bbceb20 + + # Kubernetes imagePullPolicy on Deployment. + pullPolicy: IfNotPresent + + rbac: + # annotations for the startup API Check job RBAC and PSP resources. + # +docs:property + annotations: + helm.sh/hook: post-install + helm.sh/hook-weight: "-5" + helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded + + # Automounting API credentials for a particular pod. + # +docs:property + # automountServiceAccountToken: true + + serviceAccount: + # Specifies whether a service account should be created. + create: true + + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template. + # +docs:property + # name: "" + + # Optional additional annotations to add to the Job's Service Account. + # +docs:property + annotations: + helm.sh/hook: post-install + helm.sh/hook-weight: "-5" + helm.sh/hook-delete-policy: before-hook-creation,hook-succeeded + + # Automount API credentials for a Service Account. + # +docs:property + automountServiceAccountToken: true + + # Optional additional labels to add to the startupapicheck's Service Account. + # +docs:property + # labels: {} + + # Additional volumes to add to the cert-manager controller pod. + volumes: [] + + # Additional volume mounts to add to the cert-manager controller container. + volumeMounts: [] + + # enableServiceLinks indicates whether information about services should be + # injected into pod's environment variables, matching the syntax of Docker + # links. + enableServiceLinks: false + +# Create dynamic manifests via values. +# +# For example: +# extraObjects: +# - | +# apiVersion: v1 +# kind: ConfigMap +# metadata: +# name: '{{ template "cert-manager.fullname" . }}-extra-configmap' +extraObjects: [] + +# Field used by our release pipeline to produce the static manifests. +# The field defaults to "helm" but is set to "static" when we render +# the static YAML manifests. +# +docs:hidden +creator: "helm" + +# Field that can be used as a condition when cert-manager is a dependency. +# This definition is only here as a placeholder such that it is included in +# the json schema. +# See https://helm.sh/docs/chart_best_practices/dependencies/#conditions-and-tags +# for more info. +# +docs:hidden +enabled: true diff --git a/cluster/manifests/freeleaps-controls-system/godaddy-webhook/.gitkeep b/cluster/manifests/freeleaps-controls-system/godaddy-webhook/.gitkeep deleted file mode 100644 index e69de29b..00000000 diff --git a/cluster/manifests/freeleaps-controls-system/godaddy-webhook/values.yaml b/cluster/manifests/freeleaps-controls-system/godaddy-webhook/values.yaml new file mode 100644 index 00000000..2f6a1244 --- /dev/null +++ b/cluster/manifests/freeleaps-controls-system/godaddy-webhook/values.yaml @@ -0,0 +1,25 @@ +replicaCount: 1 +image: + repository: quay.io/snowdrop/cert-manager-webhook-godaddy + tag: 0.5.0 + pullPolicy: IfNotPresent +pod: + securePort: +logging: + level: info # Log level (trace, debug, info, warn, error, fatal, panic) + format: color # Log format (text, color, json) + timestamp: true # Timestamp in log output: true/false +groupName: acme.mathmast.com +certManager: + namespace: freeleaps-controls-system + serviceAccountName: cert-manager +imagePullSecrets: [] +nameOverride: "" +fullnameOverride: "" +service: + type: ClusterIP + port: 443 +resources: {} +nodeSelector: {} +tolerations: [] +affinity: {} \ No newline at end of file diff --git a/cluster/manifests/freeleaps-controls-system/ingress-nginx/.gitkeep b/cluster/manifests/freeleaps-controls-system/ingress-nginx/.gitkeep deleted file mode 100644 index e69de29b..00000000 diff --git a/cluster/manifests/freeleaps-controls-system/ingress-nginx/values.yaml b/cluster/manifests/freeleaps-controls-system/ingress-nginx/values.yaml new file mode 100644 index 00000000..b10a44ac --- /dev/null +++ b/cluster/manifests/freeleaps-controls-system/ingress-nginx/values.yaml @@ -0,0 +1,1224 @@ +## nginx configuration +## Ref: https://github.com/kubernetes/ingress-nginx/blob/main/docs/user-guide/nginx-configuration/index.md +## + +global: + image: + # -- Registry host to pull images from. + registry: registry.k8s.io +## Overrides for generated resource names +# See templates/_helpers.tpl +# nameOverride: +# fullnameOverride: + +# -- Override the deployment namespace; defaults to .Release.Namespace +namespaceOverride: "freeleaps-controls-system" +## Labels to apply to all resources +## +commonLabels: {} +# scmhash: abc123 +# myLabel: aakkmd + +controller: + name: controller + enableAnnotationValidations: true + image: + ## Keep false as default for now! + chroot: false + # registry: registry.k8s.io + image: ingress-nginx/controller + ## for backwards compatibility consider setting the full image url via the repository value below + ## use *either* current default registry/image or repository format or installing chart by providing the values.yaml will fail + ## repository: + tag: "v1.12.0" + digest: sha256:e6b8de175acda6ca913891f0f727bca4527e797d52688cbe9fec9040d6f6b6fa + digestChroot: sha256:87c88e1c38a6c8d4483c8f70b69e2cca49853bb3ec3124b9b1be648edf139af3 + pullPolicy: IfNotPresent + runAsNonRoot: true + # -- This value must not be changed using the official image. + # uid=101(www-data) gid=82(www-data) groups=82(www-data) + runAsUser: 101 + # -- This value must not be changed using the official image. + # uid=101(www-data) gid=82(www-data) groups=82(www-data) + runAsGroup: 82 + allowPrivilegeEscalation: false + seccompProfile: + type: RuntimeDefault + readOnlyRootFilesystem: false + # -- Configures the controller container name + containerName: controller + # -- Configures the ports that the nginx-controller listens on + containerPort: + http: 80 + https: 443 + # -- Global configuration passed to the ConfigMap consumed by the controller. Values may contain Helm templates. + # Ref.: https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/ + config: {} + # -- Annotations to be added to the controller config configuration configmap. + configAnnotations: {} + # -- Will add custom headers before sending traffic to backends according to https://github.com/kubernetes/ingress-nginx/tree/main/docs/examples/customization/custom-headers + proxySetHeaders: {} + # -- Will add custom headers before sending response traffic to the client according to: https://kubernetes.github.io/ingress-nginx/user-guide/nginx-configuration/configmap/#add-headers + addHeaders: {} + # -- Optionally customize the pod dnsConfig. + dnsConfig: {} + # -- Optionally customize the pod hostAliases. + hostAliases: [] + # - ip: 127.0.0.1 + # hostnames: + # - foo.local + # - bar.local + # - ip: 10.1.2.3 + # hostnames: + # - foo.remote + # - bar.remote + # -- Optionally customize the pod hostname. + hostname: {} + # -- Optionally change this to ClusterFirstWithHostNet in case you have 'hostNetwork: true'. + # By default, while using host network, name resolution uses the host's DNS. If you wish nginx-controller + # to keep resolving names inside the k8s network, use ClusterFirstWithHostNet. + dnsPolicy: ClusterFirst + # -- Bare-metal considerations via the host network https://kubernetes.github.io/ingress-nginx/deploy/baremetal/#via-the-host-network + # Ingress status was blank because there is no Service exposing the Ingress-Nginx Controller in a configuration using the host network, the default --publish-service flag used in standard cloud setups does not apply + reportNodeInternalIp: false + # -- Process Ingress objects without ingressClass annotation/ingressClassName field + # Overrides value for --watch-ingress-without-class flag of the controller binary + # Defaults to false + watchIngressWithoutClass: false + # -- Process IngressClass per name (additionally as per spec.controller). + ingressClassByName: false + # -- This configuration enables Topology Aware Routing feature, used together with service annotation service.kubernetes.io/topology-mode="auto" + # Defaults to false + enableTopologyAwareRouting: false + # -- This configuration disable Nginx Controller Leader Election + disableLeaderElection: false + # -- Duration a leader election is valid before it's getting re-elected, e.g. `15s`, `10m` or `1h`. (Default: 30s) + electionTTL: "" + # -- This configuration defines if Ingress Controller should allow users to set + # their own *-snippet annotations, otherwise this is forbidden / dropped + # when users add those annotations. + # Global snippets in ConfigMap are still respected + allowSnippetAnnotations: false + # -- Required for use with CNI based kubernetes installations (such as ones set up by kubeadm), + # since CNI and hostport don't mix yet. Can be deprecated once https://github.com/kubernetes/kubernetes/issues/23920 + # is merged + hostNetwork: false + ## Use host ports 80 and 443 + ## Disabled by default + hostPort: + # -- Enable 'hostPort' or not + enabled: true + ports: + # -- 'hostPort' http port + http: 80 + # -- 'hostPort' https port + https: 443 + # NetworkPolicy for controller component. + networkPolicy: + # -- Enable 'networkPolicy' or not + enabled: false + # -- Election ID to use for status update, by default it uses the controller name combined with a suffix of 'leader' + electionID: "" + # -- This section refers to the creation of the IngressClass resource. + # IngressClasses are immutable and cannot be changed after creation. + # We do not support namespaced IngressClasses, yet, so a ClusterRole and a ClusterRoleBinding is required. + ingressClassResource: + # -- Name of the IngressClass + name: nginx + # -- Create the IngressClass or not + enabled: true + # -- If true, Ingresses without `ingressClassName` get assigned to this IngressClass on creation. + # Ingress creation gets rejected if there are multiple default IngressClasses. + # Ref: https://kubernetes.io/docs/concepts/services-networking/ingress/#default-ingress-class + default: false + # -- Annotations to be added to the IngressClass resource. + annotations: {} + # -- Controller of the IngressClass. An Ingress Controller looks for IngressClasses it should reconcile by this value. + # This value is also being set as the `--controller-class` argument of this Ingress Controller. + # Ref: https://kubernetes.io/docs/concepts/services-networking/ingress/#ingress-class + controllerValue: k8s.io/ingress-nginx + # -- Aliases of this IngressClass. Creates copies with identical settings but the respective alias as name. + # Useful for development environments with only one Ingress Controller but production-like Ingress resources. + # `default` gets enabled on the original IngressClass only. + aliases: [] + # aliases: + # - nginx-alias-1 + # - nginx-alias-2 + # -- A link to a custom resource containing additional configuration for the controller. + # This is optional if the controller consuming this IngressClass does not require additional parameters. + # Ref: https://kubernetes.io/docs/concepts/services-networking/ingress/#ingress-class + parameters: {} + # parameters: + # apiGroup: k8s.example.com + # kind: IngressParameters + # name: external-lb + # -- For backwards compatibility with ingress.class annotation, use ingressClass. + # Algorithm is as follows, first ingressClassName is considered, if not present, controller looks for ingress.class annotation + ingressClass: nginx + # -- Labels to add to the pod container metadata + podLabels: {} + # key: value + + # -- Security context for controller pods + podSecurityContext: {} + # -- sysctls for controller pods + ## Ref: https://kubernetes.io/docs/tasks/administer-cluster/sysctl-cluster/ + sysctls: {} + # sysctls: + # "net.core.somaxconn": "8192" + # -- Security context for controller containers + containerSecurityContext: {} + # -- Allows customization of the source of the IP address or FQDN to report + # in the ingress status field. By default, it reads the information provided + # by the service. If disable, the status field reports the IP address of the + # node or nodes where an ingress controller pod is running. + publishService: + # -- Enable 'publishService' or not + enabled: true + # -- Allows overriding of the publish service to bind to + # Must be / + pathOverride: "" + # Limit the scope of the controller to a specific namespace + scope: + # -- Enable 'scope' or not + enabled: false + # -- Namespace to limit the controller to; defaults to $(POD_NAMESPACE) + namespace: "" + # -- When scope.enabled == false, instead of watching all namespaces, we watching namespaces whose labels + # only match with namespaceSelector. Format like foo=bar. Defaults to empty, means watching all namespaces. + namespaceSelector: "" + # -- Allows customization of the configmap / nginx-configmap namespace; defaults to $(POD_NAMESPACE) + configMapNamespace: "" + tcp: + # -- Allows customization of the tcp-services-configmap; defaults to $(POD_NAMESPACE) + configMapNamespace: "" + # -- Annotations to be added to the tcp config configmap + annotations: {} + udp: + # -- Allows customization of the udp-services-configmap; defaults to $(POD_NAMESPACE) + configMapNamespace: "" + # -- Annotations to be added to the udp config configmap + annotations: {} + # -- Maxmind license key to download GeoLite2 Databases. + ## https://blog.maxmind.com/2019/12/significant-changes-to-accessing-and-using-geolite2-databases/ + maxmindLicenseKey: "" + # -- Additional command line arguments to pass to Ingress-Nginx Controller + # E.g. to specify the default SSL certificate you can use + extraArgs: {} + ## extraArgs: + ## default-ssl-certificate: "/" + ## time-buckets: "0.005,0.01,0.025,0.05,0.1,0.25,0.5,1,2.5,5,10" + ## length-buckets: "10,20,30,40,50,60,70,80,90,100" + ## size-buckets: "10,100,1000,10000,100000,1e+06,1e+07" + + # -- Additional environment variables to set + extraEnvs: [] + # extraEnvs: + # - name: FOO + # valueFrom: + # secretKeyRef: + # key: FOO + # name: secret-resource + + # -- Use a `DaemonSet` or `Deployment` + kind: Deployment + # -- Annotations to be added to the controller Deployment or DaemonSet + ## + annotations: {} + # keel.sh/pollSchedule: "@every 60m" + + # -- Labels to be added to the controller Deployment or DaemonSet and other resources that do not have option to specify labels + ## + labels: {} + # keel.sh/policy: patch + # keel.sh/trigger: poll + + # -- The update strategy to apply to the Deployment or DaemonSet + ## + updateStrategy: {} + # rollingUpdate: + # maxUnavailable: 1 + # type: RollingUpdate + + # -- Specifies the number of seconds you want to wait for the controller deployment to progress before the system reports back that it has failed. + # Ref.: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#progress-deadline-seconds + progressDeadlineSeconds: 0 + # -- `minReadySeconds` to avoid killing pods before we are ready + ## + minReadySeconds: 0 + # -- Node tolerations for server scheduling to nodes with taints + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + ## + tolerations: [] + # - key: "key" + # operator: "Equal|Exists" + # value: "value" + # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" + + # -- Affinity and anti-affinity rules for server scheduling to nodes + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## + affinity: {} + # # An example of preferred pod anti-affinity, weight is in the range 1-100 + # podAntiAffinity: + # preferredDuringSchedulingIgnoredDuringExecution: + # - weight: 100 + # podAffinityTerm: + # labelSelector: + # matchExpressions: + # - key: app.kubernetes.io/name + # operator: In + # values: + # - '{{ include "ingress-nginx.name" . }}' + # - key: app.kubernetes.io/instance + # operator: In + # values: + # - '{{ .Release.Name }}' + # - key: app.kubernetes.io/component + # operator: In + # values: + # - controller + # topologyKey: kubernetes.io/hostname + + # # An example of required pod anti-affinity + # podAntiAffinity: + # requiredDuringSchedulingIgnoredDuringExecution: + # - labelSelector: + # matchExpressions: + # - key: app.kubernetes.io/name + # operator: In + # values: + # - '{{ include "ingress-nginx.name" . }}' + # - key: app.kubernetes.io/instance + # operator: In + # values: + # - '{{ .Release.Name }}' + # - key: app.kubernetes.io/component + # operator: In + # values: + # - controller + # topologyKey: kubernetes.io/hostname + + # -- Topology spread constraints rely on node labels to identify the topology domain(s) that each Node is in. + ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ + ## + topologySpreadConstraints: [] + # - labelSelector: + # matchLabels: + # app.kubernetes.io/name: '{{ include "ingress-nginx.name" . }}' + # app.kubernetes.io/instance: '{{ .Release.Name }}' + # app.kubernetes.io/component: controller + # matchLabelKeys: + # - pod-template-hash + # topologyKey: topology.kubernetes.io/zone + # maxSkew: 1 + # whenUnsatisfiable: ScheduleAnyway + # - labelSelector: + # matchLabels: + # app.kubernetes.io/name: '{{ include "ingress-nginx.name" . }}' + # app.kubernetes.io/instance: '{{ .Release.Name }}' + # app.kubernetes.io/component: controller + # matchLabelKeys: + # - pod-template-hash + # topologyKey: kubernetes.io/hostname + # maxSkew: 1 + # whenUnsatisfiable: ScheduleAnyway + + # -- `terminationGracePeriodSeconds` to avoid killing pods before we are ready + ## wait up to five minutes for the drain of connections + ## + terminationGracePeriodSeconds: 300 + # -- Node labels for controller pod assignment + ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/ + ## + nodeSelector: + kubernetes.io/os: linux + freeleaps.cluster/ingress-node: "true" + ## Liveness and readiness probe values + ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes + ## + ## startupProbe: + ## httpGet: + ## # should match container.healthCheckPath + ## path: "/healthz" + ## port: 10254 + ## scheme: HTTP + ## initialDelaySeconds: 5 + ## periodSeconds: 5 + ## timeoutSeconds: 2 + ## successThreshold: 1 + ## failureThreshold: 5 + livenessProbe: + httpGet: + # should match container.healthCheckPath + path: "/healthz" + port: 10254 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + httpGet: + # should match container.healthCheckPath + path: "/healthz" + port: 10254 + scheme: HTTP + initialDelaySeconds: 10 + periodSeconds: 10 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 3 + # -- Path of the health check endpoint. All requests received on the port defined by + # the healthz-port parameter are forwarded internally to this path. + healthCheckPath: "/healthz" + # -- Address to bind the health check endpoint. + # It is better to set this option to the internal node address + # if the Ingress-Nginx Controller is running in the `hostNetwork: true` mode. + healthCheckHost: "" + # -- Annotations to be added to controller pods + ## + podAnnotations: {} + replicaCount: 3 + # -- Minimum available pods set in PodDisruptionBudget. + # Define either 'minAvailable' or 'maxUnavailable', never both. + minAvailable: 1 + # -- Maximum unavailable pods set in PodDisruptionBudget. If set, 'minAvailable' is ignored. + # maxUnavailable: 1 + # -- Eviction policy for unhealthy pods guarded by PodDisruptionBudget. + # Ref: https://kubernetes.io/blog/2023/01/06/unhealthy-pod-eviction-policy-for-pdbs/ + unhealthyPodEvictionPolicy: "" + ## Define requests resources to avoid probe issues due to CPU utilization in busy nodes + ## ref: https://github.com/kubernetes/ingress-nginx/issues/4735#issuecomment-551204903 + ## Ideally, there should be no limits. + ## https://engineering.indeedblog.com/blog/2019/12/cpu-throttling-regression-fix/ + resources: + ## limits: + ## cpu: 100m + ## memory: 90Mi + requests: + cpu: 100m + memory: 90Mi + # Mutually exclusive with keda autoscaling + autoscaling: + enabled: false + annotations: {} + minReplicas: 1 + maxReplicas: 11 + targetCPUUtilizationPercentage: 50 + targetMemoryUtilizationPercentage: 50 + behavior: {} + # scaleDown: + # stabilizationWindowSeconds: 300 + # policies: + # - type: Pods + # value: 1 + # periodSeconds: 180 + # scaleUp: + # stabilizationWindowSeconds: 300 + # policies: + # - type: Pods + # value: 2 + # periodSeconds: 60 + autoscalingTemplate: [] + # Custom or additional autoscaling metrics + # ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/#support-for-custom-metrics + # - type: Pods + # pods: + # metric: + # name: nginx_ingress_controller_nginx_process_requests_total + # target: + # type: AverageValue + # averageValue: 10000m + + # Mutually exclusive with hpa autoscaling + keda: + apiVersion: "keda.sh/v1alpha1" + ## apiVersion changes with keda 1.x vs 2.x + ## 2.x = keda.sh/v1alpha1 + ## 1.x = keda.k8s.io/v1alpha1 + enabled: false + minReplicas: 1 + maxReplicas: 11 + pollingInterval: 30 + cooldownPeriod: 300 + # fallback: + # failureThreshold: 3 + # replicas: 11 + restoreToOriginalReplicaCount: false + scaledObject: + annotations: {} + # Custom annotations for ScaledObject resource + # annotations: + # key: value + triggers: [] + # - type: prometheus + # metadata: + # serverAddress: http://:9090 + # metricName: http_requests_total + # threshold: '100' + # query: sum(rate(http_requests_total{deployment="my-deployment"}[2m])) + + behavior: {} + # scaleDown: + # stabilizationWindowSeconds: 300 + # policies: + # - type: Pods + # value: 1 + # periodSeconds: 180 + # scaleUp: + # stabilizationWindowSeconds: 300 + # policies: + # - type: Pods + # value: 2 + # periodSeconds: 60 + # -- Enable mimalloc as a drop-in replacement for malloc. + ## ref: https://github.com/microsoft/mimalloc + ## + enableMimalloc: true + ## Override NGINX template + customTemplate: + configMapName: "" + configMapKey: "" + service: + # -- Enable controller services or not. This does not influence the creation of either the admission webhook or the metrics service. + enabled: true + external: + # -- Enable the external controller service or not. Useful for internal-only deployments. + enabled: true + # -- Annotations to be added to the external controller service. See `controller.service.internal.annotations` for annotations to be added to the internal controller service. + annotations: {} + # -- Labels to be added to both controller services. + labels: {} + # -- Type of the external controller service. + # Ref: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types + type: LoadBalancer + # -- Pre-defined cluster internal IP address of the external controller service. Take care of collisions with existing services. + # This value is immutable. Set once, it can not be changed without deleting and re-creating the service. + # Ref: https://kubernetes.io/docs/concepts/services-networking/service/#choosing-your-own-ip-address + clusterIP: "" + # -- Pre-defined cluster internal IP addresses of the external controller service. Take care of collisions with existing services. + # This value is immutable. Set once, it can not be changed without deleting and re-creating the service. + # Ref: https://kubernetes.io/docs/concepts/services-networking/service/#choosing-your-own-ip-address + clusterIPs: [] + # -- List of node IP addresses at which the external controller service is available. + # Ref: https://kubernetes.io/docs/concepts/services-networking/service/#external-ips + externalIPs: [] + # -- Deprecated: Pre-defined IP address of the external controller service. Used by cloud providers to connect the resulting load balancer service to a pre-existing static IP. + # Ref: https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer + loadBalancerIP: "" + # -- Restrict access to the external controller service. Values must be CIDRs. Allows any source address by default. + loadBalancerSourceRanges: [] + # -- Load balancer class of the external controller service. Used by cloud providers to select a load balancer implementation other than the cloud provider default. + # Ref: https://kubernetes.io/docs/concepts/services-networking/service/#load-balancer-class + loadBalancerClass: "" + # -- Enable node port allocation for the external controller service or not. Applies to type `LoadBalancer` only. + # Ref: https://kubernetes.io/docs/concepts/services-networking/service/#load-balancer-nodeport-allocation + # allocateLoadBalancerNodePorts: true + + # -- External traffic policy of the external controller service. Set to "Local" to preserve source IP on providers supporting it. + # Ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + externalTrafficPolicy: "" + # -- Session affinity of the external controller service. Must be either "None" or "ClientIP" if set. Defaults to "None". + # Ref: https://kubernetes.io/docs/reference/networking/virtual-ips/#session-affinity + sessionAffinity: "" + # -- Specifies the health check node port (numeric port number) for the external controller service. + # If not specified, the service controller allocates a port from your cluster's node port range. + # Ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + # healthCheckNodePort: 0 + + # -- Traffic distribution policy of the external controller service. Set to "PreferClose" to route traffic to endpoints that are topologically closer to the client. + # Ref: https://kubernetes.io/docs/concepts/services-networking/service/#traffic-distribution + trafficDistribution: "" + + # -- Represents the dual-stack capabilities of the external controller service. Possible values are SingleStack, PreferDualStack or RequireDualStack. + # Fields `ipFamilies` and `clusterIP` depend on the value of this field. + # Ref: https://kubernetes.io/docs/concepts/services-networking/dual-stack/#services + ipFamilyPolicy: SingleStack + # -- List of IP families (e.g. IPv4, IPv6) assigned to the external controller service. This field is usually assigned automatically based on cluster configuration and the `ipFamilyPolicy` field. + # Ref: https://kubernetes.io/docs/concepts/services-networking/dual-stack/#services + ipFamilies: + - IPv4 + # -- Enable the HTTP listener on both controller services or not. + enableHttp: true + # -- Enable the HTTPS listener on both controller services or not. + enableHttps: true + ports: + # -- Port the external HTTP listener is published with. + http: 80 + # -- Port the external HTTPS listener is published with. + https: 443 + targetPorts: + # -- Port of the ingress controller the external HTTP listener is mapped to. + http: http + # -- Port of the ingress controller the external HTTPS listener is mapped to. + https: https + # -- Declare the app protocol of the external HTTP and HTTPS listeners or not. Supersedes provider-specific annotations for declaring the backend protocol. + # Ref: https://kubernetes.io/docs/concepts/services-networking/service/#application-protocol + appProtocol: true + nodePorts: + # -- Node port allocated for the external HTTP listener. If left empty, the service controller allocates one from the configured node port range. + http: "" + # -- Node port allocated for the external HTTPS listener. If left empty, the service controller allocates one from the configured node port range. + https: "" + # -- Node port mapping for external TCP listeners. If left empty, the service controller allocates them from the configured node port range. + # Example: + # tcp: + # 8080: 30080 + tcp: {} + # -- Node port mapping for external UDP listeners. If left empty, the service controller allocates them from the configured node port range. + # Example: + # udp: + # 53: 30053 + udp: {} + internal: + # -- Enable the internal controller service or not. Remember to configure `controller.service.internal.annotations` when enabling this. + enabled: false + # -- Annotations to be added to the internal controller service. Mandatory for the internal controller service to be created. Varies with the cloud service. + # Ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + annotations: {} + # -- Type of the internal controller service. + # Defaults to the value of `controller.service.type`. + # Ref: https://kubernetes.io/docs/concepts/services-networking/service/#publishing-services-service-types + type: "" + # -- Pre-defined cluster internal IP address of the internal controller service. Take care of collisions with existing services. + # This value is immutable. Set once, it can not be changed without deleting and re-creating the service. + # Ref: https://kubernetes.io/docs/concepts/services-networking/service/#choosing-your-own-ip-address + clusterIP: "" + # -- Pre-defined cluster internal IP addresses of the internal controller service. Take care of collisions with existing services. + # This value is immutable. Set once, it can not be changed without deleting and re-creating the service. + # Ref: https://kubernetes.io/docs/concepts/services-networking/service/#choosing-your-own-ip-address + clusterIPs: [] + # -- List of node IP addresses at which the internal controller service is available. + # Ref: https://kubernetes.io/docs/concepts/services-networking/service/#external-ips + externalIPs: [] + # -- Deprecated: Pre-defined IP address of the internal controller service. Used by cloud providers to connect the resulting load balancer service to a pre-existing static IP. + # Ref: https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer + loadBalancerIP: "" + # -- Restrict access to the internal controller service. Values must be CIDRs. Allows any source address by default. + loadBalancerSourceRanges: [] + # -- Load balancer class of the internal controller service. Used by cloud providers to select a load balancer implementation other than the cloud provider default. + # Ref: https://kubernetes.io/docs/concepts/services-networking/service/#load-balancer-class + loadBalancerClass: "" + # -- Enable node port allocation for the internal controller service or not. Applies to type `LoadBalancer` only. + # Ref: https://kubernetes.io/docs/concepts/services-networking/service/#load-balancer-nodeport-allocation + # allocateLoadBalancerNodePorts: true + + # -- External traffic policy of the internal controller service. Set to "Local" to preserve source IP on providers supporting it. + # Ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + externalTrafficPolicy: "" + # -- Session affinity of the internal controller service. Must be either "None" or "ClientIP" if set. Defaults to "None". + # Ref: https://kubernetes.io/docs/reference/networking/virtual-ips/#session-affinity + sessionAffinity: "" + # -- Specifies the health check node port (numeric port number) for the internal controller service. + # If not specified, the service controller allocates a port from your cluster's node port range. + # Ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + # healthCheckNodePort: 0 + + # -- Traffic distribution policy of the internal controller service. Set to "PreferClose" to route traffic to endpoints that are topologically closer to the client. + # Ref: https://kubernetes.io/docs/concepts/services-networking/service/#traffic-distribution + trafficDistribution: "" + + # -- Represents the dual-stack capabilities of the internal controller service. Possible values are SingleStack, PreferDualStack or RequireDualStack. + # Fields `ipFamilies` and `clusterIP` depend on the value of this field. + # Ref: https://kubernetes.io/docs/concepts/services-networking/dual-stack/#services + ipFamilyPolicy: SingleStack + # -- List of IP families (e.g. IPv4, IPv6) assigned to the internal controller service. This field is usually assigned automatically based on cluster configuration and the `ipFamilyPolicy` field. + # Ref: https://kubernetes.io/docs/concepts/services-networking/dual-stack/#services + ipFamilies: + - IPv4 + ports: {} + # -- Port the internal HTTP listener is published with. + # Defaults to the value of `controller.service.ports.http`. + # http: 80 + # -- Port the internal HTTPS listener is published with. + # Defaults to the value of `controller.service.ports.https`. + # https: 443 + + targetPorts: {} + # -- Port of the ingress controller the internal HTTP listener is mapped to. + # Defaults to the value of `controller.service.targetPorts.http`. + # http: http + # -- Port of the ingress controller the internal HTTPS listener is mapped to. + # Defaults to the value of `controller.service.targetPorts.https`. + # https: https + + # -- Declare the app protocol of the internal HTTP and HTTPS listeners or not. Supersedes provider-specific annotations for declaring the backend protocol. + # Ref: https://kubernetes.io/docs/concepts/services-networking/service/#application-protocol + appProtocol: true + nodePorts: + # -- Node port allocated for the internal HTTP listener. If left empty, the service controller allocates one from the configured node port range. + http: "" + # -- Node port allocated for the internal HTTPS listener. If left empty, the service controller allocates one from the configured node port range. + https: "" + # -- Node port mapping for internal TCP listeners. If left empty, the service controller allocates them from the configured node port range. + # Example: + # tcp: + # 8080: 30080 + tcp: {} + # -- Node port mapping for internal UDP listeners. If left empty, the service controller allocates them from the configured node port range. + # Example: + # udp: + # 53: 30053 + udp: {} + # shareProcessNamespace enables process namespace sharing within the pod. + # This can be used for example to signal log rotation using `kill -USR1` from a sidecar. + shareProcessNamespace: false + # -- Additional containers to be added to the controller pod. + # See https://github.com/lemonldap-ng-controller/lemonldap-ng-controller as example. + extraContainers: [] + # - name: my-sidecar + # image: nginx:latest + # - name: lemonldap-ng-controller + # image: lemonldapng/lemonldap-ng-controller:0.2.0 + # args: + # - /lemonldap-ng-controller + # - --alsologtostderr + # - --configmap=$(POD_NAMESPACE)/lemonldap-ng-configuration + # env: + # - name: POD_NAME + # valueFrom: + # fieldRef: + # fieldPath: metadata.name + # - name: POD_NAMESPACE + # valueFrom: + # fieldRef: + # fieldPath: metadata.namespace + # volumeMounts: + # - name: copy-portal-skins + # mountPath: /srv/var/lib/lemonldap-ng/portal/skins + + # -- Additional volumeMounts to the controller main container. + extraVolumeMounts: [] + # - name: copy-portal-skins + # mountPath: /var/lib/lemonldap-ng/portal/skins + + # -- Additional volumes to the controller pod. + extraVolumes: [] + # - name: copy-portal-skins + # emptyDir: {} + + # -- Containers, which are run before the app containers are started. + extraInitContainers: [] + # - name: init-myservice + # image: busybox + # command: ['sh', '-c', 'until nslookup myservice; do echo waiting for myservice; sleep 2; done;'] + + # -- Modules, which are mounted into the core nginx image. + extraModules: [] + # - name: mytestmodule + # image: + # # registry: registry.k8s.io + # image: ingress-nginx/mytestmodule + # ## for backwards compatibility consider setting the full image url via the repository value below + # ## use *either* current default registry/image or repository format or installing chart by providing the values.yaml will fail + # ## repository: + # tag: "v1.0.0" + # digest: "" + # distroless: false + # containerSecurityContext: + # runAsNonRoot: true + # runAsUser: + # runAsGroup: + # allowPrivilegeEscalation: false + # seccompProfile: + # type: RuntimeDefault + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # resources: {} + # + # The image must contain a `/usr/local/bin/init_module.sh` executable, which + # will be executed as initContainers, to move its config files within the + # mounted volume. + + admissionWebhooks: + name: admission + annotations: {} + # ignore-check.kube-linter.io/no-read-only-rootfs: "This deployment needs write access to root filesystem". + + ## Additional annotations to the admission webhooks. + ## These annotations will be added to the ValidatingWebhookConfiguration and + ## the Jobs Spec of the admission webhooks. + enabled: true + # -- Additional environment variables to set + extraEnvs: [] + # extraEnvs: + # - name: FOO + # valueFrom: + # secretKeyRef: + # key: FOO + # name: secret-resource + # -- Admission Webhook failure policy to use + failurePolicy: Fail + # timeoutSeconds: 10 + port: 8443 + certificate: "/usr/local/certificates/cert" + key: "/usr/local/certificates/key" + namespaceSelector: {} + objectSelector: {} + # -- Labels to be added to admission webhooks + labels: {} + service: + annotations: {} + # clusterIP: "" + externalIPs: [] + # loadBalancerIP: "" + loadBalancerSourceRanges: [] + servicePort: 443 + type: ClusterIP + createSecretJob: + name: create + # -- Security context for secret creation containers + securityContext: + runAsNonRoot: true + runAsUser: 65532 + runAsGroup: 65532 + allowPrivilegeEscalation: false + seccompProfile: + type: RuntimeDefault + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + resources: {} + # limits: + # cpu: 10m + # memory: 20Mi + # requests: + # cpu: 10m + # memory: 20Mi + patchWebhookJob: + name: patch + # -- Security context for webhook patch containers + securityContext: + runAsNonRoot: true + runAsUser: 65532 + runAsGroup: 65532 + allowPrivilegeEscalation: false + seccompProfile: + type: RuntimeDefault + capabilities: + drop: + - ALL + readOnlyRootFilesystem: true + resources: {} + patch: + enabled: true + image: + # registry: registry.k8s.io + image: ingress-nginx/kube-webhook-certgen + ## for backwards compatibility consider setting the full image url via the repository value below + ## use *either* current default registry/image or repository format or installing chart by providing the values.yaml will fail + ## repository: + tag: v1.5.0 + digest: sha256:aaafd456bda110628b2d4ca6296f38731a3aaf0bf7581efae824a41c770a8fc4 + pullPolicy: IfNotPresent + # -- Provide a priority class name to the webhook patching job + ## + priorityClassName: "" + podAnnotations: {} + # NetworkPolicy for webhook patch + networkPolicy: + # -- Enable 'networkPolicy' or not + enabled: false + nodeSelector: + kubernetes.io/os: linux + tolerations: [] + # -- Labels to be added to patch job resources + labels: {} + # -- Security context for secret creation & webhook patch pods + securityContext: {} + # -- Admission webhook patch job RBAC + rbac: + # -- Create RBAC or not + create: true + # -- Admission webhook patch job service account + serviceAccount: + # -- Create a service account or not + create: true + # -- Custom service account name + name: "" + # -- Auto-mount service account token or not + automountServiceAccountToken: true + # Use certmanager to generate webhook certs + certManager: + enabled: false + # self-signed root certificate + rootCert: + # default to be 5y + duration: "" + admissionCert: + # default to be 1y + duration: "" + # issuerRef: + # name: "issuer" + # kind: "ClusterIssuer" + metrics: + port: 10254 + portName: metrics + # if this port is changed, change healthz-port: in extraArgs: accordingly + enabled: false + service: + # -- Enable the metrics service or not. + enabled: true + annotations: {} + # prometheus.io/scrape: "true" + # prometheus.io/port: "10254" + # -- Labels to be added to the metrics service resource + labels: {} + # clusterIP: "" + + # -- List of IP addresses at which the stats-exporter service is available + ## Ref: https://kubernetes.io/docs/concepts/services-networking/service/#external-ips + ## + externalIPs: [] + # loadBalancerIP: "" + loadBalancerSourceRanges: [] + servicePort: 10254 + type: ClusterIP + # externalTrafficPolicy: "" + # nodePort: "" + serviceMonitor: + enabled: false + additionalLabels: {} + # -- Annotations to be added to the ServiceMonitor. + annotations: {} + ## The label to use to retrieve the job name from. + ## jobLabel: "app.kubernetes.io/name" + namespace: "" + namespaceSelector: {} + ## Default: scrape .Release.Namespace or namespaceOverride only + ## To scrape all, use the following: + ## namespaceSelector: + ## any: true + scrapeInterval: 30s + # honorLabels: true + targetLabels: [] + relabelings: [] + metricRelabelings: [] + # -- Per-scrape limit on number of labels that will be accepted for a sample. + labelLimit: 0 + # -- Per-scrape limit on length of labels name that will be accepted for a sample. + labelNameLengthLimit: 0 + # -- Per-scrape limit on length of labels value that will be accepted for a sample. + labelValueLengthLimit: 0 + # -- Defines a per-scrape limit on the number of scraped samples that will be accepted. + sampleLimit: 0 + # -- Defines a limit on the number of scraped targets that will be accepted. + targetLimit: 0 + prometheusRule: + enabled: false + additionalLabels: {} + # -- Annotations to be added to the PrometheusRule. + annotations: {} + # namespace: "" + rules: [] + # # These are just examples rules, please adapt them to your needs + # - alert: NGINXConfigFailed + # expr: count(nginx_ingress_controller_config_last_reload_successful == 0) > 0 + # for: 1s + # labels: + # severity: critical + # annotations: + # description: bad ingress config - nginx config test failed + # summary: uninstall the latest ingress changes to allow config reloads to resume + # # By default a fake self-signed certificate is generated as default and + # # it is fine if it expires. If `--default-ssl-certificate` flag is used + # # and a valid certificate passed please do not filter for `host` label! + # # (i.e. delete `{host!="_"}` so also the default SSL certificate is + # # checked for expiration) + # - alert: NGINXCertificateExpiry + # expr: (avg(nginx_ingress_controller_ssl_expire_time_seconds{host!="_"}) by (host) - time()) < 604800 + # for: 1s + # labels: + # severity: critical + # annotations: + # description: ssl certificate(s) will expire in less then a week + # summary: renew expiring certificates to avoid downtime + # - alert: NGINXTooMany500s + # expr: 100 * ( sum( nginx_ingress_controller_requests{status=~"5.+"} ) / sum(nginx_ingress_controller_requests) ) > 5 + # for: 1m + # labels: + # severity: warning + # annotations: + # description: Too many 5XXs + # summary: More than 5% of all requests returned 5XX, this requires your attention + # - alert: NGINXTooMany400s + # expr: 100 * ( sum( nginx_ingress_controller_requests{status=~"4.+"} ) / sum(nginx_ingress_controller_requests) ) > 5 + # for: 1m + # labels: + # severity: warning + # annotations: + # description: Too many 4XXs + # summary: More than 5% of all requests returned 4XX, this requires your attention + # -- Improve connection draining when ingress controller pod is deleted using a lifecycle hook: + # With this new hook, we increased the default terminationGracePeriodSeconds from 30 seconds + # to 300, allowing the draining of connections up to five minutes. + # If the active connections end before that, the pod will terminate gracefully at that time. + # To effectively take advantage of this feature, the Configmap feature + # worker-shutdown-timeout new value is 240s instead of 10s. + ## + lifecycle: + preStop: + exec: + command: + - /wait-shutdown + priorityClassName: "" +# -- Rollback limit +## +revisionHistoryLimit: 10 +## Default 404 backend +## +defaultBackend: + ## + enabled: false + name: defaultbackend + image: + # registry: registry.k8s.io + image: defaultbackend-amd64 + ## for backwards compatibility consider setting the full image url via the repository value below + ## use *either* current default registry/image or repository format or installing chart by providing the values.yaml will fail + ## repository: + tag: "1.5" + pullPolicy: IfNotPresent + runAsNonRoot: true + # nobody user -> uid 65534 + runAsUser: 65534 + runAsGroup: 65534 + allowPrivilegeEscalation: false + seccompProfile: + type: RuntimeDefault + readOnlyRootFilesystem: true + extraArgs: {} + serviceAccount: + create: true + name: "" + automountServiceAccountToken: true + # -- Additional environment variables to set for defaultBackend pods + extraEnvs: [] + port: 8080 + ## Readiness and liveness probes for default backend + ## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/ + ## + livenessProbe: + failureThreshold: 3 + initialDelaySeconds: 30 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 5 + readinessProbe: + failureThreshold: 6 + initialDelaySeconds: 0 + periodSeconds: 5 + successThreshold: 1 + timeoutSeconds: 5 + # -- The update strategy to apply to the Deployment or DaemonSet + ## + updateStrategy: {} + # rollingUpdate: + # maxUnavailable: 1 + # type: RollingUpdate + + # -- `minReadySeconds` to avoid killing pods before we are ready + ## + minReadySeconds: 0 + # -- Node tolerations for server scheduling to nodes with taints + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + ## + tolerations: [] + # - key: "key" + # operator: "Equal|Exists" + # value: "value" + # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" + + # -- Affinity and anti-affinity rules for server scheduling to nodes + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + affinity: {} + # # An example of preferred pod anti-affinity, weight is in the range 1-100 + # podAntiAffinity: + # preferredDuringSchedulingIgnoredDuringExecution: + # - weight: 100 + # podAffinityTerm: + # labelSelector: + # matchExpressions: + # - key: app.kubernetes.io/name + # operator: In + # values: + # - '{{ include "ingress-nginx.name" . }}' + # - key: app.kubernetes.io/instance + # operator: In + # values: + # - '{{ .Release.Name }}' + # - key: app.kubernetes.io/component + # operator: In + # values: + # - default-backend + # topologyKey: kubernetes.io/hostname + + # # An example of required pod anti-affinity + # podAntiAffinity: + # requiredDuringSchedulingIgnoredDuringExecution: + # - labelSelector: + # matchExpressions: + # - key: app.kubernetes.io/name + # operator: In + # values: + # - '{{ include "ingress-nginx.name" . }}' + # - key: app.kubernetes.io/instance + # operator: In + # values: + # - '{{ .Release.Name }}' + # - key: app.kubernetes.io/component + # operator: In + # values: + # - default-backend + # topologyKey: kubernetes.io/hostname + + # -- Topology spread constraints rely on node labels to identify the topology domain(s) that each Node is in. + # Ref.: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ + topologySpreadConstraints: [] + # - labelSelector: + # matchLabels: + # app.kubernetes.io/name: '{{ include "ingress-nginx.name" . }}' + # app.kubernetes.io/instance: '{{ .Release.Name }}' + # app.kubernetes.io/component: default-backend + # matchLabelKeys: + # - pod-template-hash + # topologyKey: topology.kubernetes.io/zone + # maxSkew: 1 + # whenUnsatisfiable: ScheduleAnyway + # - labelSelector: + # matchLabels: + # app.kubernetes.io/name: '{{ include "ingress-nginx.name" . }}' + # app.kubernetes.io/instance: '{{ .Release.Name }}' + # app.kubernetes.io/component: default-backend + # matchLabelKeys: + # - pod-template-hash + # topologyKey: kubernetes.io/hostname + # maxSkew: 1 + # whenUnsatisfiable: ScheduleAnyway + # -- Security context for default backend pods + podSecurityContext: {} + # -- Security context for default backend containers + containerSecurityContext: {} + # -- Labels to add to the pod container metadata + podLabels: {} + # key: value + + # -- Node labels for default backend pod assignment + ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/ + ## + nodeSelector: + kubernetes.io/os: linux + # -- Annotations to be added to default backend pods + ## + podAnnotations: {} + replicaCount: 1 + # -- Minimum available pods set in PodDisruptionBudget. + # Define either 'minAvailable' or 'maxUnavailable', never both. + minAvailable: 1 + # -- Maximum unavailable pods set in PodDisruptionBudget. If set, 'minAvailable' is ignored. + # maxUnavailable: 1 + # -- Eviction policy for unhealthy pods guarded by PodDisruptionBudget. + # Ref: https://kubernetes.io/blog/2023/01/06/unhealthy-pod-eviction-policy-for-pdbs/ + unhealthyPodEvictionPolicy: "" + resources: {} + # limits: + # cpu: 10m + # memory: 20Mi + # requests: + # cpu: 10m + # memory: 20Mi + + extraVolumeMounts: [] + ## Additional volumeMounts to the default backend container. + # - name: copy-portal-skins + # mountPath: /var/lib/lemonldap-ng/portal/skins + + extraVolumes: [] + ## Additional volumes to the default backend pod. + # - name: copy-portal-skins + # emptyDir: {} + + extraConfigMaps: [] + ## Additional configmaps to the default backend pod. + # - name: my-extra-configmap-1 + # labels: + # type: config-1 + # data: + # extra_file_1.html: | + # + # - name: my-extra-configmap-2 + # labels: + # type: config-2 + # data: + # extra_file_2.html: | + # + + autoscaling: + annotations: {} + enabled: false + minReplicas: 1 + maxReplicas: 2 + targetCPUUtilizationPercentage: 50 + targetMemoryUtilizationPercentage: 50 + # NetworkPolicy for default backend component. + networkPolicy: + # -- Enable 'networkPolicy' or not + enabled: false + service: + annotations: {} + # clusterIP: "" + # -- Pre-defined cluster internal IP addresses of the default backend service. Take care of collisions with existing services. + # This value is immutable. Set once, it can not be changed without deleting and re-creating the service. + # Ref: https://kubernetes.io/docs/concepts/services-networking/service/#choosing-your-own-ip-address + clusterIPs: [] + + # -- List of IP addresses at which the default backend service is available + ## Ref: https://kubernetes.io/docs/concepts/services-networking/service/#external-ips + ## + externalIPs: [] + # loadBalancerIP: "" + loadBalancerSourceRanges: [] + servicePort: 80 + type: ClusterIP + priorityClassName: "" + # -- Labels to be added to the default backend resources + labels: {} +## Enable RBAC as per https://github.com/kubernetes/ingress-nginx/blob/main/docs/deploy/rbac.md and https://github.com/kubernetes/ingress-nginx/issues/266 +rbac: + create: true + scope: false +serviceAccount: + create: true + name: "" + automountServiceAccountToken: true + # -- Annotations for the controller service account + annotations: {} +# -- Optional array of imagePullSecrets containing private registry credentials +## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ +imagePullSecrets: [] +# - name: secretName + +# -- TCP service key-value pairs +## Ref: https://github.com/kubernetes/ingress-nginx/blob/main/docs/user-guide/exposing-tcp-udp-services.md +## +tcp: {} +# "8080": "default/example-tcp-svc:9000" + +# -- UDP service key-value pairs +## Ref: https://github.com/kubernetes/ingress-nginx/blob/main/docs/user-guide/exposing-tcp-udp-services.md +## +udp: {} +# "53": "kube-system/kube-dns:53" + +# -- Prefix for TCP and UDP ports names in ingress controller service +## Some cloud providers, like Yandex Cloud may have a requirements for a port name regex to support cloud load balancer integration +portNamePrefix: "" +# -- (string) A base64-encoded Diffie-Hellman parameter. +# This can be generated with: `openssl dhparam 4096 2> /dev/null | base64` +## Ref: https://github.com/kubernetes/ingress-nginx/tree/main/docs/examples/customization/ssl-dh-param +dhParam: "" \ No newline at end of file diff --git a/cluster/manifests/freeleaps-controls-system/namespace.yaml b/cluster/manifests/freeleaps-controls-system/namespace.yaml new file mode 100644 index 00000000..893b761a --- /dev/null +++ b/cluster/manifests/freeleaps-controls-system/namespace.yaml @@ -0,0 +1,6 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: freeleaps-controls-system + labels: + name: freeleaps-controls-system \ No newline at end of file diff --git a/cluster/manifests/freeleaps-monitoring-system/kube-prometheus-stack/.gitkeep b/cluster/manifests/freeleaps-monitoring-system/kube-prometheus-stack/.gitkeep deleted file mode 100644 index e69de29b..00000000 diff --git a/cluster/manifests/freeleaps-monitoring-system/kube-prometheus-stack/values.yaml b/cluster/manifests/freeleaps-monitoring-system/kube-prometheus-stack/values.yaml new file mode 100644 index 00000000..d6558627 --- /dev/null +++ b/cluster/manifests/freeleaps-monitoring-system/kube-prometheus-stack/values.yaml @@ -0,0 +1,5022 @@ +# Default values for kube-prometheus-stack. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +## Provide a name in place of kube-prometheus-stack for `app:` labels +## +nameOverride: "" + +## Override the deployment namespace +## +namespaceOverride: "freeleaps-monitoring-system" + +## Provide a k8s version to auto dashboard import script example: kubeTargetVersionOverride: 1.26.6 +## +kubeTargetVersionOverride: "" + +## Allow kubeVersion to be overridden while creating the ingress +## +kubeVersionOverride: "" + +## Provide a name to substitute for the full names of resources +## +fullnameOverride: "" + +## Labels to apply to all resources +## +commonLabels: {} +# scmhash: abc123 +# myLabel: aakkmd + +## Install Prometheus Operator CRDs +## +crds: + enabled: true + +## custom Rules to override "for" and "severity" in defaultRules +## +customRules: {} + # AlertmanagerFailedReload: + # for: 3m + # AlertmanagerMembersInconsistent: + # for: 5m + # severity: "warning" + +## Create default rules for monitoring the cluster +## +defaultRules: + create: true + rules: + alertmanager: true + etcd: true + configReloaders: true + general: true + k8sContainerCpuUsageSecondsTotal: true + k8sContainerMemoryCache: true + k8sContainerMemoryRss: true + k8sContainerMemorySwap: true + k8sContainerResource: true + k8sContainerMemoryWorkingSetBytes: true + k8sPodOwner: true + kubeApiserverAvailability: true + kubeApiserverBurnrate: true + kubeApiserverHistogram: true + kubeApiserverSlos: true + kubeControllerManager: true + kubelet: true + kubeProxy: true + kubePrometheusGeneral: true + kubePrometheusNodeRecording: true + kubernetesApps: true + kubernetesResources: true + kubernetesStorage: true + kubernetesSystem: true + kubeSchedulerAlerting: true + kubeSchedulerRecording: true + kubeStateMetrics: true + network: true + node: true + nodeExporterAlerting: true + nodeExporterRecording: true + prometheus: true + prometheusOperator: true + windows: true + + ## Reduce app namespace alert scope + appNamespacesTarget: ".*" + + ## Set keep_firing_for for all alerts + keepFiringFor: "" + + ## Labels for default rules + labels: {} + ## Annotations for default rules + annotations: {} + + ## Additional labels for PrometheusRule alerts + additionalRuleLabels: {} + + ## Additional annotations for PrometheusRule alerts + additionalRuleAnnotations: {} + + ## Additional labels for specific PrometheusRule alert groups + additionalRuleGroupLabels: + alertmanager: {} + etcd: {} + configReloaders: {} + general: {} + k8sContainerCpuUsageSecondsTotal: {} + k8sContainerMemoryCache: {} + k8sContainerMemoryRss: {} + k8sContainerMemorySwap: {} + k8sContainerResource: {} + k8sPodOwner: {} + kubeApiserverAvailability: {} + kubeApiserverBurnrate: {} + kubeApiserverHistogram: {} + kubeApiserverSlos: {} + kubeControllerManager: {} + kubelet: {} + kubeProxy: {} + kubePrometheusGeneral: {} + kubePrometheusNodeRecording: {} + kubernetesApps: {} + kubernetesResources: {} + kubernetesStorage: {} + kubernetesSystem: {} + kubeSchedulerAlerting: {} + kubeSchedulerRecording: {} + kubeStateMetrics: {} + network: {} + node: {} + nodeExporterAlerting: {} + nodeExporterRecording: {} + prometheus: {} + prometheusOperator: {} + + ## Additional annotations for specific PrometheusRule alerts groups + additionalRuleGroupAnnotations: + alertmanager: {} + etcd: {} + configReloaders: {} + general: {} + k8sContainerCpuUsageSecondsTotal: {} + k8sContainerMemoryCache: {} + k8sContainerMemoryRss: {} + k8sContainerMemorySwap: {} + k8sContainerResource: {} + k8sPodOwner: {} + kubeApiserverAvailability: {} + kubeApiserverBurnrate: {} + kubeApiserverHistogram: {} + kubeApiserverSlos: {} + kubeControllerManager: {} + kubelet: {} + kubeProxy: {} + kubePrometheusGeneral: {} + kubePrometheusNodeRecording: {} + kubernetesApps: {} + kubernetesResources: {} + kubernetesStorage: {} + kubernetesSystem: {} + kubeSchedulerAlerting: {} + kubeSchedulerRecording: {} + kubeStateMetrics: {} + network: {} + node: {} + nodeExporterAlerting: {} + nodeExporterRecording: {} + prometheus: {} + prometheusOperator: {} + + additionalAggregationLabels: [] + + ## Prefix for runbook URLs. Use this to override the first part of the runbookURLs that is common to all rules. + runbookUrl: "https://runbooks.prometheus-operator.dev/runbooks" + + node: + fsSelector: 'fstype!=""' + # fsSelector: 'fstype=~"ext[234]|btrfs|xfs|zfs"' + + ## Disabled PrometheusRule alerts + disabled: {} + # KubeAPIDown: true + # NodeRAIDDegraded: true + +## Deprecated way to provide custom recording or alerting rules to be deployed into the cluster. +## +# additionalPrometheusRules: [] +# - name: my-rule-file +# groups: +# - name: my_group +# rules: +# - record: my_record +# expr: 100 * my_record + +## Provide custom recording or alerting rules to be deployed into the cluster. +## +additionalPrometheusRulesMap: {} +# rule-name: +# groups: +# - name: my_group +# rules: +# - record: my_record +# expr: 100 * my_record + +## +global: + rbac: + create: true + + ## Create ClusterRoles that extend the existing view, edit and admin ClusterRoles to interact with prometheus-operator CRDs + ## Ref: https://kubernetes.io/docs/reference/access-authn-authz/rbac/#aggregated-clusterroles + createAggregateClusterRoles: false + pspEnabled: false + pspAnnotations: {} + ## Specify pod annotations + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#apparmor + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#seccomp + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#sysctl + ## + # seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*' + # seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default' + # apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default' + + ## Global image registry to use if it needs to be overriden for some specific use cases (e.g local registries, custom images, ...) + ## + imageRegistry: "" + + ## Reference to one or more secrets to be used when pulling images + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + imagePullSecrets: [] + # - name: "image-pull-secret" + # or + # - "image-pull-secret" + +windowsMonitoring: + ## Deploys the windows-exporter and Windows-specific dashboards and rules (job name must be 'windows-exporter') + enabled: false + +## Configuration for prometheus-windows-exporter +## ref: https://github.com/prometheus-community/helm-charts/tree/main/charts/prometheus-windows-exporter +## +prometheus-windows-exporter: + ## Enable ServiceMonitor and set Kubernetes label to use as a job label + ## + prometheus: + monitor: + enabled: true + jobLabel: jobLabel + + releaseLabel: true + + ## Set job label to 'windows-exporter' as required by the default Prometheus rules and Grafana dashboards + ## + podLabels: + jobLabel: windows-exporter + + ## Enable memory and container metrics as required by the default Prometheus rules and Grafana dashboards + ## + config: |- + collectors: + enabled: '[defaults],memory,container' + +## Configuration for alertmanager +## ref: https://prometheus.io/docs/alerting/alertmanager/ +## +alertmanager: + + ## Deploy alertmanager + ## + enabled: true + + ## Annotations for Alertmanager + ## + annotations: {} + + ## Api that prometheus will use to communicate with alertmanager. Possible values are v1, v2 + ## + apiVersion: v2 + + ## @param alertmanager.enableFeatures Enable access to Alertmanager disabled features. + ## + enableFeatures: [] + + ## Service account for Alertmanager to use. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ + ## + serviceAccount: + create: true + name: "" + annotations: {} + automountServiceAccountToken: true + + ## Configure pod disruption budgets for Alertmanager + ## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/#specifying-a-poddisruptionbudget + ## + podDisruptionBudget: + enabled: false + minAvailable: 1 + maxUnavailable: "" + + ## Alertmanager configuration directives + ## ref: https://prometheus.io/docs/alerting/configuration/#configuration-file + ## https://prometheus.io/webtools/alerting/routing-tree-editor/ + ## + config: + global: + resolve_timeout: 5m + inhibit_rules: + - source_matchers: + - 'severity = critical' + target_matchers: + - 'severity =~ warning|info' + equal: + - 'namespace' + - 'alertname' + - source_matchers: + - 'severity = warning' + target_matchers: + - 'severity = info' + equal: + - 'namespace' + - 'alertname' + - source_matchers: + - 'alertname = InfoInhibitor' + target_matchers: + - 'severity = info' + equal: + - 'namespace' + - target_matchers: + - 'alertname = InfoInhibitor' + route: + group_by: ['namespace'] + group_wait: 30s + group_interval: 5m + repeat_interval: 12h + receiver: 'null' + routes: + - receiver: 'null' + matchers: + - alertname = "Watchdog" + receivers: + - name: 'null' + templates: + - '/etc/alertmanager/config/*.tmpl' + + ## Alertmanager configuration directives (as string type, preferred over the config hash map) + ## stringConfig will be used only, if tplConfig is true + ## ref: https://prometheus.io/docs/alerting/configuration/#configuration-file + ## https://prometheus.io/webtools/alerting/routing-tree-editor/ + ## + stringConfig: "" + + ## Pass the Alertmanager configuration directives through Helm's templating + ## engine. If the Alertmanager configuration contains Alertmanager templates, + ## they'll need to be properly escaped so that they are not interpreted by + ## Helm + ## ref: https://helm.sh/docs/developing_charts/#using-the-tpl-function + ## https://prometheus.io/docs/alerting/configuration/#tmpl_string + ## https://prometheus.io/docs/alerting/notifications/ + ## https://prometheus.io/docs/alerting/notification_examples/ + tplConfig: false + + ## Alertmanager template files to format alerts + ## By default, templateFiles are placed in /etc/alertmanager/config/ and if + ## they have a .tmpl file suffix will be loaded. See config.templates above + ## to change, add other suffixes. If adding other suffixes, be sure to update + ## config.templates above to include those suffixes. + ## ref: https://prometheus.io/docs/alerting/notifications/ + ## https://prometheus.io/docs/alerting/notification_examples/ + ## + templateFiles: {} + # + ## An example template: + # template_1.tmpl: |- + # {{ define "cluster" }}{{ .ExternalURL | reReplaceAll ".*alertmanager\\.(.*)" "$1" }}{{ end }} + # + # {{ define "slack.myorg.text" }} + # {{- $root := . -}} + # {{ range .Alerts }} + # *Alert:* {{ .Annotations.summary }} - `{{ .Labels.severity }}` + # *Cluster:* {{ template "cluster" $root }} + # *Description:* {{ .Annotations.description }} + # *Graph:* <{{ .GeneratorURL }}|:chart_with_upwards_trend:> + # *Runbook:* <{{ .Annotations.runbook }}|:spiral_note_pad:> + # *Details:* + # {{ range .Labels.SortedPairs }} - *{{ .Name }}:* `{{ .Value }}` + # {{ end }} + # {{ end }} + # {{ end }} + + ingress: + enabled: false + + # For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName + # See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress + # ingressClassName: nginx + + annotations: {} + + labels: {} + + ## Override ingress to a different defined port on the service + # servicePort: 8081 + ## Override ingress to a different service then the default, this is useful if you need to + ## point to a specific instance of the alertmanager (eg kube-prometheus-stack-alertmanager-0) + # serviceName: kube-prometheus-stack-alertmanager-0 + + ## Hosts must be provided if Ingress is enabled. + ## + hosts: [] + # - alertmanager.domain.com + + ## Paths to use for ingress rules - one path should match the alertmanagerSpec.routePrefix + ## + paths: [] + # - / + + ## For Kubernetes >= 1.18 you should specify the pathType (determines how Ingress paths should be matched) + ## See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#better-path-matching-with-path-types + # pathType: ImplementationSpecific + + ## TLS configuration for Alertmanager Ingress + ## Secret must be manually created in the namespace + ## + tls: [] + # - secretName: alertmanager-general-tls + # hosts: + # - alertmanager.example.com + + # -- BETA: Configure the gateway routes for the chart here. + # More routes can be added by adding a dictionary key like the 'main' route. + # Be aware that this is an early beta of this feature, + # kube-prometheus-stack does not guarantee this works and is subject to change. + # Being BETA this can/will change in the future without notice, do not use unless you want to take that risk + # [[ref]](https://gateway-api.sigs.k8s.io/references/spec/#gateway.networking.k8s.io%2fv1alpha2) + route: + main: + # -- Enables or disables the route + enabled: false + + # -- Set the route apiVersion, e.g. gateway.networking.k8s.io/v1 or gateway.networking.k8s.io/v1alpha2 + apiVersion: gateway.networking.k8s.io/v1 + # -- Set the route kind + # Valid options are GRPCRoute, HTTPRoute, TCPRoute, TLSRoute, UDPRoute + kind: HTTPRoute + + annotations: {} + labels: {} + + hostnames: [] + # - my-filter.example.com + parentRefs: [] + # - name: acme-gw + + matches: + - path: + type: PathPrefix + value: / + + ## Filters define the filters that are applied to requests that match this rule. + filters: [] + + ## Additional custom rules that can be added to the route + additionalRules: [] + + ## Configuration for Alertmanager secret + ## + secret: + annotations: {} + + ## Configuration for creating an Ingress that will map to each Alertmanager replica service + ## alertmanager.servicePerReplica must be enabled + ## + ingressPerReplica: + enabled: false + + # For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName + # See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress + # ingressClassName: nginx + + annotations: {} + labels: {} + + ## Final form of the hostname for each per replica ingress is + ## {{ ingressPerReplica.hostPrefix }}-{{ $replicaNumber }}.{{ ingressPerReplica.hostDomain }} + ## + ## Prefix for the per replica ingress that will have `-$replicaNumber` + ## appended to the end + hostPrefix: "" + ## Domain that will be used for the per replica ingress + hostDomain: "" + + ## Paths to use for ingress rules + ## + paths: [] + # - / + + ## For Kubernetes >= 1.18 you should specify the pathType (determines how Ingress paths should be matched) + ## See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#better-path-matching-with-path-types + # pathType: ImplementationSpecific + + ## Secret name containing the TLS certificate for alertmanager per replica ingress + ## Secret must be manually created in the namespace + tlsSecretName: "" + + ## Separated secret for each per replica Ingress. Can be used together with cert-manager + ## + tlsSecretPerReplica: + enabled: false + ## Final form of the secret for each per replica ingress is + ## {{ tlsSecretPerReplica.prefix }}-{{ $replicaNumber }} + ## + prefix: "alertmanager" + + ## Configuration for Alertmanager service + ## + service: + annotations: {} + labels: {} + clusterIP: "" + ipDualStack: + enabled: false + ipFamilies: ["IPv6", "IPv4"] + ipFamilyPolicy: "PreferDualStack" + + ## Port for Alertmanager Service to listen on + ## + port: 9093 + ## To be used with a proxy extraContainer port + ## + targetPort: 9093 + ## Port to expose on each node + ## Only used if service.type is 'NodePort' + ## + nodePort: 30903 + ## List of IP addresses at which the Prometheus server service is available + ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips + ## + + ## Additional ports to open for Alertmanager service + ## + additionalPorts: [] + # - name: oauth-proxy + # port: 8081 + # targetPort: 8081 + # - name: oauth-metrics + # port: 8082 + # targetPort: 8082 + + externalIPs: [] + loadBalancerIP: "" + loadBalancerSourceRanges: [] + + ## Denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints + ## + externalTrafficPolicy: Cluster + + ## If you want to make sure that connections from a particular client are passed to the same Pod each time + ## Accepts 'ClientIP' or 'None' + ## + sessionAffinity: None + + ## If you want to modify the ClientIP sessionAffinity timeout + ## The value must be >0 && <=86400(for 1 day) if ServiceAffinity == "ClientIP" + ## + sessionAffinityConfig: + clientIP: + timeoutSeconds: 10800 + + ## Service type + ## + type: ClusterIP + + ## Configuration for creating a separate Service for each statefulset Alertmanager replica + ## + servicePerReplica: + enabled: false + annotations: {} + + ## Port for Alertmanager Service per replica to listen on + ## + port: 9093 + + ## To be used with a proxy extraContainer port + targetPort: 9093 + + ## Port to expose on each node + ## Only used if servicePerReplica.type is 'NodePort' + ## + nodePort: 30904 + + ## Loadbalancer source IP ranges + ## Only used if servicePerReplica.type is "LoadBalancer" + loadBalancerSourceRanges: [] + + ## Denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints + ## + externalTrafficPolicy: Cluster + + ## Service type + ## + type: ClusterIP + + ## Configuration for creating a ServiceMonitor for AlertManager + ## + serviceMonitor: + ## If true, a ServiceMonitor will be created for the AlertManager service. + ## + selfMonitor: true + + ## Scrape interval. If not set, the Prometheus default scrape interval is used. + ## + interval: "" + + ## Additional labels + ## + additionalLabels: {} + + ## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted. + ## + sampleLimit: 0 + + ## TargetLimit defines a limit on the number of scraped targets that will be accepted. + ## + targetLimit: 0 + + ## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelLimit: 0 + + ## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelNameLengthLimit: 0 + + ## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelValueLengthLimit: 0 + + ## proxyUrl: URL of a proxy that should be used for scraping. + ## + proxyUrl: "" + + ## scheme: HTTP scheme to use for scraping. Can be used with `tlsConfig` for example if using istio mTLS. + scheme: "" + + ## enableHttp2: Whether to enable HTTP2. + ## See https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#endpoint + enableHttp2: true + + ## tlsConfig: TLS configuration to use when scraping the endpoint. For example if using istio mTLS. + ## Of type: https://github.com/coreos/prometheus-operator/blob/main/Documentation/api.md#tlsconfig + tlsConfig: {} + + bearerTokenFile: + + ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion. + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig + ## + metricRelabelings: [] + # - action: keep + # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' + # sourceLabels: [__name__] + + ## RelabelConfigs to apply to samples before scraping + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig + ## + relabelings: [] + # - sourceLabels: [__meta_kubernetes_pod_node_name] + # separator: ; + # regex: ^(.*)$ + # targetLabel: nodename + # replacement: $1 + # action: replace + + ## Additional Endpoints + ## + additionalEndpoints: [] + # - port: oauth-metrics + # path: /metrics + + ## Settings affecting alertmanagerSpec + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#alertmanagerspec + ## + alertmanagerSpec: + ## Statefulset's persistent volume claim retention policy + ## whenDeleted and whenScaled determine whether + ## statefulset's PVCs are deleted (true) or retained (false) + ## on scaling down and deleting statefulset, respectively. + ## Requires Kubernetes version 1.27.0+. + ## Ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#persistentvolumeclaim-retention + persistentVolumeClaimRetentionPolicy: {} + # whenDeleted: Retain + # whenScaled: Retain + + ## Standard object's metadata. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#metadata + ## Metadata Labels and Annotations gets propagated to the Alertmanager pods. + ## + podMetadata: {} + + ## Image of Alertmanager + ## + image: + registry: quay.io + repository: prometheus/alertmanager + tag: v0.27.0 + sha: "" + + ## If true then the user will be responsible to provide a secret with alertmanager configuration + ## So when true the config part will be ignored (including templateFiles) and the one in the secret will be used + ## + useExistingSecret: false + + ## Secrets is a list of Secrets in the same namespace as the Alertmanager object, which shall be mounted into the + ## Alertmanager Pods. The Secrets are mounted into /etc/alertmanager/secrets/. + ## + secrets: [] + + ## If false then the user will opt out of automounting API credentials. + ## + automountServiceAccountToken: true + + ## ConfigMaps is a list of ConfigMaps in the same namespace as the Alertmanager object, which shall be mounted into the Alertmanager Pods. + ## The ConfigMaps are mounted into /etc/alertmanager/configmaps/. + ## + configMaps: [] + + ## ConfigSecret is the name of a Kubernetes Secret in the same namespace as the Alertmanager object, which contains configuration for + ## this Alertmanager instance. Defaults to 'alertmanager-' The secret is mounted into /etc/alertmanager/config. + ## + # configSecret: + + ## WebTLSConfig defines the TLS parameters for HTTPS + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#alertmanagerwebspec + web: {} + + ## AlertmanagerConfigs to be selected to merge and configure Alertmanager with. + ## + alertmanagerConfigSelector: {} + ## Example which selects all alertmanagerConfig resources + ## with label "alertconfig" with values any of "example-config" or "example-config-2" + # alertmanagerConfigSelector: + # matchExpressions: + # - key: alertconfig + # operator: In + # values: + # - example-config + # - example-config-2 + # + ## Example which selects all alertmanagerConfig resources with label "role" set to "example-config" + # alertmanagerConfigSelector: + # matchLabels: + # role: example-config + + ## Namespaces to be selected for AlertmanagerConfig discovery. If nil, only check own namespace. + ## + alertmanagerConfigNamespaceSelector: {} + ## Example which selects all namespaces + ## with label "alertmanagerconfig" with values any of "example-namespace" or "example-namespace-2" + # alertmanagerConfigNamespaceSelector: + # matchExpressions: + # - key: alertmanagerconfig + # operator: In + # values: + # - example-namespace + # - example-namespace-2 + + ## Example which selects all namespaces with label "alertmanagerconfig" set to "enabled" + # alertmanagerConfigNamespaceSelector: + # matchLabels: + # alertmanagerconfig: enabled + + ## AlermanagerConfig to be used as top level configuration + ## + alertmanagerConfiguration: {} + ## Example with select a global alertmanagerconfig + # alertmanagerConfiguration: + # name: global-alertmanager-Configuration + + ## Defines the strategy used by AlertmanagerConfig objects to match alerts. eg: + ## + alertmanagerConfigMatcherStrategy: {} + ## Example with use OnNamespace strategy + # alertmanagerConfigMatcherStrategy: + # type: OnNamespace + + ## Define Log Format + # Use logfmt (default) or json logging + logFormat: logfmt + + ## Log level for Alertmanager to be configured with. + ## + logLevel: info + + ## Size is the expected size of the alertmanager cluster. The controller will eventually make the size of the + ## running cluster equal to the expected size. + replicas: 1 + + ## Time duration Alertmanager shall retain data for. Default is '120h', and must match the regular expression + ## [0-9]+(ms|s|m|h) (milliseconds seconds minutes hours). + ## + retention: 120h + + ## Storage is the definition of how storage will be used by the Alertmanager instances. + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/user-guides/storage.md + ## + storage: {} + # volumeClaimTemplate: + # spec: + # storageClassName: gluster + # accessModes: ["ReadWriteOnce"] + # resources: + # requests: + # storage: 50Gi + # selector: {} + + + ## The external URL the Alertmanager instances will be available under. This is necessary to generate correct URLs. This is necessary if Alertmanager is not served from root of a DNS name. string false + ## + externalUrl: + + ## The route prefix Alertmanager registers HTTP handlers for. This is useful, if using ExternalURL and a proxy is rewriting HTTP routes of a request, and the actual ExternalURL is still true, + ## but the server serves requests under a different route prefix. For example for use with kubectl proxy. + ## + routePrefix: / + + ## scheme: HTTP scheme to use. Can be used with `tlsConfig` for example if using istio mTLS. + scheme: "" + + ## tlsConfig: TLS configuration to use when connect to the endpoint. For example if using istio mTLS. + ## Of type: https://github.com/coreos/prometheus-operator/blob/main/Documentation/api.md#tlsconfig + tlsConfig: {} + + ## If set to true all actions on the underlying managed objects are not going to be performed, except for delete actions. + ## + paused: false + + ## Define which Nodes the Pods are scheduled on. + ## ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + + ## Define resources requests and limits for single Pods. + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: {} + # requests: + # memory: 400Mi + + ## Pod anti-affinity can prevent the scheduler from placing Prometheus replicas on the same node. + ## The default value "soft" means that the scheduler should *prefer* to not schedule two replica pods onto the same node but no guarantee is provided. + ## The value "hard" means that the scheduler is *required* to not schedule two replica pods onto the same node. + ## The value "" will disable pod anti-affinity so that no anti-affinity rules will be configured. + ## + podAntiAffinity: "soft" + + ## If anti-affinity is enabled sets the topologyKey to use for anti-affinity. + ## This can be changed to, for example, failure-domain.beta.kubernetes.io/zone + ## + podAntiAffinityTopologyKey: kubernetes.io/hostname + + ## Assign custom affinity rules to the alertmanager instance + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + ## + affinity: {} + # nodeAffinity: + # requiredDuringSchedulingIgnoredDuringExecution: + # nodeSelectorTerms: + # - matchExpressions: + # - key: kubernetes.io/e2e-az-name + # operator: In + # values: + # - e2e-az1 + # - e2e-az2 + + ## If specified, the pod's tolerations. + ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + # - key: "key" + # operator: "Equal" + # value: "value" + # effect: "NoSchedule" + + ## If specified, the pod's topology spread constraints. + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ + ## + topologySpreadConstraints: [] + # - maxSkew: 1 + # topologyKey: topology.kubernetes.io/zone + # whenUnsatisfiable: DoNotSchedule + # labelSelector: + # matchLabels: + # app: alertmanager + + ## SecurityContext holds pod-level security attributes and common container settings. + ## This defaults to non root user with uid 1000 and gid 2000. *v1.PodSecurityContext false + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## + securityContext: + runAsGroup: 2000 + runAsNonRoot: true + runAsUser: 1000 + fsGroup: 2000 + seccompProfile: + type: RuntimeDefault + + ## ListenLocal makes the Alertmanager server listen on loopback, so that it does not bind against the Pod IP. + ## Note this is only for the Alertmanager UI, not the gossip communication. + ## + listenLocal: false + + ## Containers allows injecting additional containers. This is meant to allow adding an authentication proxy to an Alertmanager pod. + ## + containers: [] + # containers: + # - name: oauth-proxy + # image: quay.io/oauth2-proxy/oauth2-proxy:v7.5.1 + # args: + # - --upstream=http://127.0.0.1:9093 + # - --http-address=0.0.0.0:8081 + # - --metrics-address=0.0.0.0:8082 + # - ... + # ports: + # - containerPort: 8081 + # name: oauth-proxy + # protocol: TCP + # - containerPort: 8082 + # name: oauth-metrics + # protocol: TCP + # resources: {} + + # Additional volumes on the output StatefulSet definition. + volumes: [] + + # Additional VolumeMounts on the output StatefulSet definition. + volumeMounts: [] + + ## InitContainers allows injecting additional initContainers. This is meant to allow doing some changes + ## (permissions, dir tree) on mounted volumes before starting prometheus + initContainers: [] + + ## Priority class assigned to the Pods + ## + priorityClassName: "" + + ## AdditionalPeers allows injecting a set of additional Alertmanagers to peer with to form a highly available cluster. + ## + additionalPeers: [] + + ## PortName to use for Alert Manager. + ## + portName: "http-web" + + ## ClusterAdvertiseAddress is the explicit address to advertise in cluster. Needs to be provided for non RFC1918 [1] (public) addresses. [1] RFC1918: https://tools.ietf.org/html/rfc1918 + ## + clusterAdvertiseAddress: false + + ## clusterGossipInterval determines interval between gossip attempts. + ## Needs to be specified as GoDuration, a time duration that can be parsed by Go’s time.ParseDuration() (e.g. 45ms, 30s, 1m, 1h20m15s) + clusterGossipInterval: "" + + ## clusterPeerTimeout determines timeout for cluster peering. + ## Needs to be specified as GoDuration, a time duration that can be parsed by Go’s time.ParseDuration() (e.g. 45ms, 30s, 1m, 1h20m15s) + clusterPeerTimeout: "" + + ## clusterPushpullInterval determines interval between pushpull attempts. + ## Needs to be specified as GoDuration, a time duration that can be parsed by Go’s time.ParseDuration() (e.g. 45ms, 30s, 1m, 1h20m15s) + clusterPushpullInterval: "" + + ## clusterLabel defines the identifier that uniquely identifies the Alertmanager cluster. + clusterLabel: "" + + ## ForceEnableClusterMode ensures Alertmanager does not deactivate the cluster mode when running with a single replica. + ## Use case is e.g. spanning an Alertmanager cluster across Kubernetes clusters with a single replica in each. + forceEnableClusterMode: false + + ## Minimum number of seconds for which a newly created pod should be ready without any of its container crashing for it to + ## be considered available. Defaults to 0 (pod will be considered available as soon as it is ready). + minReadySeconds: 0 + + ## Additional configuration which is not covered by the properties above. (passed through tpl) + additionalConfig: {} + + ## Additional configuration which is not covered by the properties above. + ## Useful, if you need advanced templating inside alertmanagerSpec. + ## Otherwise, use alertmanager.alertmanagerSpec.additionalConfig (passed through tpl) + additionalConfigString: "" + + ## ExtraSecret can be used to store various data in an extra secret + ## (use it for example to store hashed basic auth credentials) + extraSecret: + ## if not set, name will be auto generated + # name: "" + annotations: {} + data: {} + # auth: | + # foo:$apr1$OFG3Xybp$ckL0FHDAkoXYIlH9.cysT0 + # someoneelse:$apr1$DMZX2Z4q$6SbQIfyuLQd.xmo/P0m2c. + +## Using default values from https://github.com/grafana/helm-charts/blob/main/charts/grafana/values.yaml +## +grafana: + enabled: true + namespaceOverride: "" + + ## ForceDeployDatasources Create datasource configmap even if grafana deployment has been disabled + ## + forceDeployDatasources: false + + ## ForceDeployDashboard Create dashboard configmap even if grafana deployment has been disabled + ## + forceDeployDashboards: false + + ## Deploy default dashboards + ## + defaultDashboardsEnabled: true + + ## Timezone for the default dashboards + ## Other options are: browser or a specific timezone, i.e. Europe/Luxembourg + ## + defaultDashboardsTimezone: browser + + ## Editable flag for the default dashboards + ## + defaultDashboardsEditable: true + + adminPassword: r6Y@QTb*7BQN@hDGsN + + rbac: + ## If true, Grafana PSPs will be created + ## + pspEnabled: false + + ingress: + ## If true, Grafana Ingress will be created + ## + enabled: false + + ## IngressClassName for Grafana Ingress. + ## Should be provided if Ingress is enable. + ## + # ingressClassName: nginx + + ## Annotations for Grafana Ingress + ## + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + + ## Labels to be added to the Ingress + ## + labels: {} + + ## Hostnames. + ## Must be provided if Ingress is enable. + ## + # hosts: + # - grafana.domain.com + hosts: [] + + ## Path for grafana ingress + path: / + + ## TLS configuration for grafana Ingress + ## Secret must be manually created in the namespace + ## + tls: [] + # - secretName: grafana-general-tls + # hosts: + # - grafana.example.com + + # # To make Grafana persistent (Using Statefulset) + # # + # persistence: + # enabled: true + # type: sts + # storageClassName: "storageClassName" + # accessModes: + # - ReadWriteOnce + # size: 20Gi + # finalizers: + # - kubernetes.io/pvc-protection + + serviceAccount: + create: true + autoMount: true + + sidecar: + dashboards: + enabled: true + label: grafana_dashboard + labelValue: "1" + # Allow discovery in all namespaces for dashboards + searchNamespace: ALL + + # Support for new table panels, when enabled grafana auto migrates the old table panels to newer table panels + enableNewTablePanelSyntax: false + + ## Annotations for Grafana dashboard configmaps + ## + annotations: {} + multicluster: + global: + enabled: false + etcd: + enabled: false + provider: + allowUiUpdates: false + datasources: + enabled: true + defaultDatasourceEnabled: true + isDefaultDatasource: true + + name: Prometheus + uid: prometheus + + ## URL of prometheus datasource + ## + # url: http://prometheus-stack-prometheus:9090/ + + ## Prometheus request timeout in seconds + # timeout: 30 + + # If not defined, will use prometheus.prometheusSpec.scrapeInterval or its default + # defaultDatasourceScrapeInterval: 15s + + ## Annotations for Grafana datasource configmaps + ## + annotations: {} + + ## Set method for HTTP to send query to datasource + httpMethod: POST + + ## Create datasource for each Pod of Prometheus StatefulSet; + ## this uses headless service `prometheus-operated` which is + ## created by Prometheus Operator + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/0fee93e12dc7c2ea1218f19ae25ec6b893460590/pkg/prometheus/statefulset.go#L255-L286 + createPrometheusReplicasDatasources: false + label: grafana_datasource + labelValue: "1" + + ## Field with internal link pointing to existing data source in Grafana. + ## Can be provisioned via additionalDataSources + exemplarTraceIdDestinations: {} + # datasourceUid: Jaeger + # traceIdLabelName: trace_id + alertmanager: + enabled: true + name: Alertmanager + uid: alertmanager + handleGrafanaManagedAlerts: false + implementation: prometheus + + extraConfigmapMounts: [] + # - name: certs-configmap + # mountPath: /etc/grafana/ssl/ + # configMap: certs-configmap + # readOnly: true + + deleteDatasources: [] + # - name: example-datasource + # orgId: 1 + + ## Configure additional grafana datasources (passed through tpl) + ## ref: http://docs.grafana.org/administration/provisioning/#datasources + additionalDataSources: [] + # - name: prometheus-sample + # access: proxy + # basicAuth: true + # secureJsonData: + # basicAuthPassword: pass + # basicAuthUser: daco + # editable: false + # jsonData: + # tlsSkipVerify: true + # orgId: 1 + # type: prometheus + # url: https://{{ printf "%s-prometheus.svc" .Release.Name }}:9090 + # version: 1 + + # Flag to mark provisioned data sources for deletion if they are no longer configured. + # It takes no effect if data sources are already listed in the deleteDatasources section. + # ref: https://grafana.com/docs/grafana/latest/administration/provisioning/#example-data-source-config-file + prune: false + + ## Passed to grafana subchart and used by servicemonitor below + ## + service: + portName: http-web + ipFamilies: [] + ipFamilyPolicy: "" + + serviceMonitor: + # If true, a ServiceMonitor CRD is created for a prometheus operator + # https://github.com/coreos/prometheus-operator + # + enabled: true + + # Path to use for scraping metrics. Might be different if server.root_url is set + # in grafana.ini + path: "/metrics" + + # namespace: monitoring (defaults to use the namespace this chart is deployed to) + + # labels for the ServiceMonitor + labels: {} + + # Scrape interval. If not set, the Prometheus default scrape interval is used. + # + interval: "" + scheme: http + tlsConfig: {} + scrapeTimeout: 30s + + ## RelabelConfigs to apply to samples before scraping + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig + ## + relabelings: [] + # - sourceLabels: [__meta_kubernetes_pod_node_name] + # separator: ; + # regex: ^(.*)$ + # targetLabel: nodename + # replacement: $1 + # action: replace + +## Flag to disable all the kubernetes component scrapers +## +kubernetesServiceMonitors: + enabled: true + +## Component scraping the kube api server +## +kubeApiServer: + enabled: true + tlsConfig: + serverName: kubernetes + insecureSkipVerify: false + serviceMonitor: + ## Scrape interval. If not set, the Prometheus default scrape interval is used. + ## + interval: "" + + ## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted. + ## + sampleLimit: 0 + + ## TargetLimit defines a limit on the number of scraped targets that will be accepted. + ## + targetLimit: 0 + + ## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelLimit: 0 + + ## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelNameLengthLimit: 0 + + ## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelValueLengthLimit: 0 + + ## proxyUrl: URL of a proxy that should be used for scraping. + ## + proxyUrl: "" + + jobLabel: component + selector: + matchLabels: + component: apiserver + provider: kubernetes + + ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion. + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig + ## + metricRelabelings: + # Drop excessively noisy apiserver buckets. + - action: drop + regex: apiserver_request_duration_seconds_bucket;(0.15|0.2|0.3|0.35|0.4|0.45|0.6|0.7|0.8|0.9|1.25|1.5|1.75|2|3|3.5|4|4.5|6|7|8|9|15|25|40|50) + sourceLabels: + - __name__ + - le + # - action: keep + # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' + # sourceLabels: [__name__] + + ## RelabelConfigs to apply to samples before scraping + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig + ## + relabelings: [] + # - sourceLabels: + # - __meta_kubernetes_namespace + # - __meta_kubernetes_service_name + # - __meta_kubernetes_endpoint_port_name + # action: keep + # regex: default;kubernetes;https + # - targetLabel: __address__ + # replacement: kubernetes.default.svc:443 + + ## Additional labels + ## + additionalLabels: {} + # foo: bar + + ## defines the labels which are transferred from the associated Kubernetes Service object onto the ingested metrics. + ## https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#servicemonitor + targetLabels: [] + +## Component scraping the kubelet and kubelet-hosted cAdvisor +## +kubelet: + enabled: true + namespace: kube-system + + serviceMonitor: + ## Attach metadata to discovered targets. Requires Prometheus v2.45 for endpoints created by the operator. + ## + attachMetadata: + node: false + + ## Scrape interval. If not set, the Prometheus default scrape interval is used. + ## + interval: "" + + ## If true, Prometheus use (respect) labels provided by exporter. + ## + honorLabels: true + + ## If true, Prometheus ingests metrics with timestamp provided by exporter. If false, Prometheus ingests metrics with timestamp of scrape. + ## + honorTimestamps: true + + ## If true, defines whether Prometheus tracks staleness of the metrics that have an explicit timestamp present in scraped data. Has no effect if `honorTimestamps` is false. + ## We recommend enabling this if you want the best possible accuracy for container_ metrics scraped from cadvisor. + ## For more details see: https://github.com/prometheus-community/helm-charts/pull/5063#issuecomment-2545374849 + trackTimestampsStaleness: true + + ## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted. + ## + sampleLimit: 0 + + ## TargetLimit defines a limit on the number of scraped targets that will be accepted. + ## + targetLimit: 0 + + ## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelLimit: 0 + + ## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelNameLengthLimit: 0 + + ## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelValueLengthLimit: 0 + + ## proxyUrl: URL of a proxy that should be used for scraping. + ## + proxyUrl: "" + + ## Enable scraping the kubelet over https. For requirements to enable this see + ## https://github.com/prometheus-operator/prometheus-operator/issues/926 + ## + https: true + + ## Skip TLS certificate validation when scraping. + ## This is enabled by default because kubelet serving certificate deployed by kubeadm is by default self-signed + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/kubeadm/kubeadm-certs/#kubelet-serving-certs + ## + insecureSkipVerify: true + + ## Enable scraping /metrics/probes from kubelet's service + ## + probes: true + + ## Enable scraping /metrics/resource from kubelet's service + ## This is disabled by default because container metrics are already exposed by cAdvisor + ## + resource: false + # From kubernetes 1.18, /metrics/resource/v1alpha1 renamed to /metrics/resource + resourcePath: "/metrics/resource/v1alpha1" + ## Configure the scrape interval for resource metrics. This is configured to the default Kubelet cAdvisor + ## minimum housekeeping interval in order to avoid missing samples. Note, this value is ignored + ## if kubelet.serviceMonitor.interval is not empty. + resourceInterval: 10s + + ## Enable scraping /metrics/cadvisor from kubelet's service + ## + cAdvisor: true + ## Configure the scrape interval for cAdvisor. This is configured to the default Kubelet cAdvisor + ## minimum housekeeping interval in order to avoid missing samples. Note, this value is ignored + ## if kubelet.serviceMonitor.interval is not empty. + cAdvisorInterval: 10s + ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion. + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig + ## + cAdvisorMetricRelabelings: + # Drop less useful container CPU metrics. + - sourceLabels: [__name__] + action: drop + regex: 'container_cpu_(cfs_throttled_seconds_total|load_average_10s|system_seconds_total|user_seconds_total)' + # Drop less useful container / always zero filesystem metrics. + - sourceLabels: [__name__] + action: drop + regex: 'container_fs_(io_current|io_time_seconds_total|io_time_weighted_seconds_total|reads_merged_total|sector_reads_total|sector_writes_total|writes_merged_total)' + # Drop less useful / always zero container memory metrics. + - sourceLabels: [__name__] + action: drop + regex: 'container_memory_(mapped_file|swap)' + # Drop less useful container process metrics. + - sourceLabels: [__name__] + action: drop + regex: 'container_(file_descriptors|tasks_state|threads_max)' + # Drop container spec metrics that overlap with kube-state-metrics. + - sourceLabels: [__name__] + action: drop + regex: 'container_spec.*' + # Drop cgroup metrics with no pod. + - sourceLabels: [id, pod] + action: drop + regex: '.+;' + # - sourceLabels: [__name__, image] + # separator: ; + # regex: container_([a-z_]+); + # replacement: $1 + # action: drop + # - sourceLabels: [__name__] + # separator: ; + # regex: container_(network_tcp_usage_total|network_udp_usage_total|tasks_state|cpu_load_average_10s) + # replacement: $1 + # action: drop + + ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion. + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig + ## + probesMetricRelabelings: [] + # - sourceLabels: [__name__, image] + # separator: ; + # regex: container_([a-z_]+); + # replacement: $1 + # action: drop + # - sourceLabels: [__name__] + # separator: ; + # regex: container_(network_tcp_usage_total|network_udp_usage_total|tasks_state|cpu_load_average_10s) + # replacement: $1 + # action: drop + + ## RelabelConfigs to apply to samples before scraping + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig + ## + ## metrics_path is required to match upstream rules and charts + cAdvisorRelabelings: + - action: replace + sourceLabels: [__metrics_path__] + targetLabel: metrics_path + # - sourceLabels: [__meta_kubernetes_pod_node_name] + # separator: ; + # regex: ^(.*)$ + # targetLabel: nodename + # replacement: $1 + # action: replace + + ## RelabelConfigs to apply to samples before scraping + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig + ## + probesRelabelings: + - action: replace + sourceLabels: [__metrics_path__] + targetLabel: metrics_path + # - sourceLabels: [__meta_kubernetes_pod_node_name] + # separator: ; + # regex: ^(.*)$ + # targetLabel: nodename + # replacement: $1 + # action: replace + + ## RelabelConfigs to apply to samples before scraping + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig + ## + resourceRelabelings: + - action: replace + sourceLabels: [__metrics_path__] + targetLabel: metrics_path + # - sourceLabels: [__meta_kubernetes_pod_node_name] + # separator: ; + # regex: ^(.*)$ + # targetLabel: nodename + # replacement: $1 + # action: replace + + ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion. + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig + ## + metricRelabelings: [] + # - sourceLabels: [__name__, image] + # separator: ; + # regex: container_([a-z_]+); + # replacement: $1 + # action: drop + # - sourceLabels: [__name__] + # separator: ; + # regex: container_(network_tcp_usage_total|network_udp_usage_total|tasks_state|cpu_load_average_10s) + # replacement: $1 + # action: drop + + ## RelabelConfigs to apply to samples before scraping + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig + ## + ## metrics_path is required to match upstream rules and charts + relabelings: + - action: replace + sourceLabels: [__metrics_path__] + targetLabel: metrics_path + # - sourceLabels: [__meta_kubernetes_pod_node_name] + # separator: ; + # regex: ^(.*)$ + # targetLabel: nodename + # replacement: $1 + # action: replace + + ## Additional labels + ## + additionalLabels: {} + # foo: bar + + ## defines the labels which are transferred from the associated Kubernetes Service object onto the ingested metrics. + ## https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#servicemonitor + targetLabels: [] + +## Component scraping the kube controller manager +## +kubeControllerManager: + enabled: true + + ## If your kube controller manager is not deployed as a pod, specify IPs it can be found on + ## + endpoints: [] + # - 10.141.4.22 + # - 10.141.4.23 + # - 10.141.4.24 + + ## If using kubeControllerManager.endpoints only the port and targetPort are used + ## + service: + enabled: true + ## If null or unset, the value is determined dynamically based on target Kubernetes version due to change + ## of default port in Kubernetes 1.22. + ## + port: null + targetPort: null + ipDualStack: + enabled: false + ipFamilies: ["IPv6", "IPv4"] + ipFamilyPolicy: "PreferDualStack" + # selector: + # component: kube-controller-manager + + serviceMonitor: + enabled: true + ## Scrape interval. If not set, the Prometheus default scrape interval is used. + ## + interval: "" + + ## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted. + ## + sampleLimit: 0 + + ## TargetLimit defines a limit on the number of scraped targets that will be accepted. + ## + targetLimit: 0 + + ## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelLimit: 0 + + ## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelNameLengthLimit: 0 + + ## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelValueLengthLimit: 0 + + ## proxyUrl: URL of a proxy that should be used for scraping. + ## + proxyUrl: "" + + ## port: Name of the port the metrics will be scraped from + ## + port: http-metrics + + jobLabel: jobLabel + selector: {} + # matchLabels: + # component: kube-controller-manager + + ## Enable scraping kube-controller-manager over https. + ## Requires proper certs (not self-signed) and delegated authentication/authorization checks. + ## If null or unset, the value is determined dynamically based on target Kubernetes version. + ## + https: null + + # Skip TLS certificate validation when scraping + insecureSkipVerify: null + + # Name of the server to use when validating TLS certificate + serverName: null + + ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion. + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig + ## + metricRelabelings: [] + # - action: keep + # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' + # sourceLabels: [__name__] + + ## RelabelConfigs to apply to samples before scraping + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig + ## + relabelings: [] + # - sourceLabels: [__meta_kubernetes_pod_node_name] + # separator: ; + # regex: ^(.*)$ + # targetLabel: nodename + # replacement: $1 + # action: replace + + ## Additional labels + ## + additionalLabels: {} + # foo: bar + + ## defines the labels which are transferred from the associated Kubernetes Service object onto the ingested metrics. + ## https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#servicemonitor + targetLabels: [] + +## Component scraping coreDns. Use either this or kubeDns +## +coreDns: + enabled: true + service: + enabled: true + port: 9153 + targetPort: 9153 + + ipDualStack: + enabled: false + ipFamilies: ["IPv6", "IPv4"] + ipFamilyPolicy: "PreferDualStack" + # selector: + # k8s-app: kube-dns + serviceMonitor: + enabled: true + ## Scrape interval. If not set, the Prometheus default scrape interval is used. + ## + interval: "" + + ## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted. + ## + sampleLimit: 0 + + ## TargetLimit defines a limit on the number of scraped targets that will be accepted. + ## + targetLimit: 0 + + ## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelLimit: 0 + + ## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelNameLengthLimit: 0 + + ## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelValueLengthLimit: 0 + + ## proxyUrl: URL of a proxy that should be used for scraping. + ## + proxyUrl: "" + + ## port: Name of the port the metrics will be scraped from + ## + port: http-metrics + + jobLabel: jobLabel + selector: {} + # matchLabels: + # k8s-app: kube-dns + + ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion. + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig + ## + metricRelabelings: [] + # - action: keep + # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' + # sourceLabels: [__name__] + + ## RelabelConfigs to apply to samples before scraping + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig + ## + relabelings: [] + # - sourceLabels: [__meta_kubernetes_pod_node_name] + # separator: ; + # regex: ^(.*)$ + # targetLabel: nodename + # replacement: $1 + # action: replace + + ## Additional labels + ## + additionalLabels: {} + # foo: bar + + ## defines the labels which are transferred from the associated Kubernetes Service object onto the ingested metrics. + ## https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#servicemonitor + targetLabels: [] + +## Component scraping kubeDns. Use either this or coreDns +## +kubeDns: + enabled: false + service: + dnsmasq: + port: 10054 + targetPort: 10054 + skydns: + port: 10055 + targetPort: 10055 + ipDualStack: + enabled: false + ipFamilies: ["IPv6", "IPv4"] + ipFamilyPolicy: "PreferDualStack" + # selector: + # k8s-app: kube-dns + serviceMonitor: + ## Scrape interval. If not set, the Prometheus default scrape interval is used. + ## + interval: "" + + ## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted. + ## + sampleLimit: 0 + + ## TargetLimit defines a limit on the number of scraped targets that will be accepted. + ## + targetLimit: 0 + + ## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelLimit: 0 + + ## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelNameLengthLimit: 0 + + ## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelValueLengthLimit: 0 + + ## proxyUrl: URL of a proxy that should be used for scraping. + ## + proxyUrl: "" + + jobLabel: jobLabel + selector: {} + # matchLabels: + # k8s-app: kube-dns + + ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion. + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig + ## + metricRelabelings: [] + # - action: keep + # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' + # sourceLabels: [__name__] + + ## RelabelConfigs to apply to samples before scraping + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig + ## + relabelings: [] + # - sourceLabels: [__meta_kubernetes_pod_node_name] + # separator: ; + # regex: ^(.*)$ + # targetLabel: nodename + # replacement: $1 + # action: replace + + ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion. + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig + ## + dnsmasqMetricRelabelings: [] + # - action: keep + # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' + # sourceLabels: [__name__] + + ## RelabelConfigs to apply to samples before scraping + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig + ## + dnsmasqRelabelings: [] + # - sourceLabels: [__meta_kubernetes_pod_node_name] + # separator: ; + # regex: ^(.*)$ + # targetLabel: nodename + # replacement: $1 + # action: replace + + ## Additional labels + ## + additionalLabels: {} + # foo: bar + + ## defines the labels which are transferred from the associated Kubernetes Service object onto the ingested metrics. + ## https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#servicemonitor + targetLabels: [] + +## Component scraping etcd +## +kubeEtcd: + enabled: true + + ## If your etcd is not deployed as a pod, specify IPs it can be found on + ## + endpoints: [] + # - 10.141.4.22 + # - 10.141.4.23 + # - 10.141.4.24 + + ## Etcd service. If using kubeEtcd.endpoints only the port and targetPort are used + ## + service: + enabled: true + port: 2381 + targetPort: 2381 + ipDualStack: + enabled: false + ipFamilies: ["IPv6", "IPv4"] + ipFamilyPolicy: "PreferDualStack" + # selector: + # component: etcd + + ## Configure secure access to the etcd cluster by loading a secret into prometheus and + ## specifying security configuration below. For example, with a secret named etcd-client-cert + ## + ## serviceMonitor: + ## scheme: https + ## insecureSkipVerify: false + ## serverName: localhost + ## caFile: /etc/prometheus/secrets/etcd-client-cert/etcd-ca + ## certFile: /etc/prometheus/secrets/etcd-client-cert/etcd-client + ## keyFile: /etc/prometheus/secrets/etcd-client-cert/etcd-client-key + ## + serviceMonitor: + enabled: true + ## Scrape interval. If not set, the Prometheus default scrape interval is used. + ## + interval: "" + + ## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted. + ## + sampleLimit: 0 + + ## TargetLimit defines a limit on the number of scraped targets that will be accepted. + ## + targetLimit: 0 + + ## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelLimit: 0 + + ## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelNameLengthLimit: 0 + + ## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelValueLengthLimit: 0 + + ## proxyUrl: URL of a proxy that should be used for scraping. + ## + proxyUrl: "" + scheme: http + insecureSkipVerify: false + serverName: "" + caFile: "" + certFile: "" + keyFile: "" + + ## port: Name of the port the metrics will be scraped from + ## + port: http-metrics + + jobLabel: jobLabel + selector: {} + # matchLabels: + # component: etcd + + ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion. + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig + ## + metricRelabelings: [] + # - action: keep + # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' + # sourceLabels: [__name__] + + ## RelabelConfigs to apply to samples before scraping + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig + ## + relabelings: [] + # - sourceLabels: [__meta_kubernetes_pod_node_name] + # separator: ; + # regex: ^(.*)$ + # targetLabel: nodename + # replacement: $1 + # action: replace + + ## Additional labels + ## + additionalLabels: {} + # foo: bar + + ## defines the labels which are transferred from the associated Kubernetes Service object onto the ingested metrics. + ## https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#servicemonitor + targetLabels: [] + +## Component scraping kube scheduler +## +kubeScheduler: + enabled: true + + ## If your kube scheduler is not deployed as a pod, specify IPs it can be found on + ## + endpoints: [] + # - 10.141.4.22 + # - 10.141.4.23 + # - 10.141.4.24 + + ## If using kubeScheduler.endpoints only the port and targetPort are used + ## + service: + enabled: true + ## If null or unset, the value is determined dynamically based on target Kubernetes version due to change + ## of default port in Kubernetes 1.23. + ## + port: null + targetPort: null + ipDualStack: + enabled: false + ipFamilies: ["IPv6", "IPv4"] + ipFamilyPolicy: "PreferDualStack" + # selector: + # component: kube-scheduler + + serviceMonitor: + enabled: true + ## Scrape interval. If not set, the Prometheus default scrape interval is used. + ## + interval: "" + + ## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted. + ## + sampleLimit: 0 + + ## TargetLimit defines a limit on the number of scraped targets that will be accepted. + ## + targetLimit: 0 + + ## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelLimit: 0 + + ## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelNameLengthLimit: 0 + + ## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelValueLengthLimit: 0 + + ## proxyUrl: URL of a proxy that should be used for scraping. + ## + proxyUrl: "" + ## Enable scraping kube-scheduler over https. + ## Requires proper certs (not self-signed) and delegated authentication/authorization checks. + ## If null or unset, the value is determined dynamically based on target Kubernetes version. + ## + https: null + + ## port: Name of the port the metrics will be scraped from + ## + port: http-metrics + + jobLabel: jobLabel + selector: {} + # matchLabels: + # component: kube-scheduler + + ## Skip TLS certificate validation when scraping + insecureSkipVerify: null + + ## Name of the server to use when validating TLS certificate + serverName: null + + ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion. + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig + ## + metricRelabelings: [] + # - action: keep + # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' + # sourceLabels: [__name__] + + ## RelabelConfigs to apply to samples before scraping + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig + ## + relabelings: [] + # - sourceLabels: [__meta_kubernetes_pod_node_name] + # separator: ; + # regex: ^(.*)$ + # targetLabel: nodename + # replacement: $1 + # action: replace + + ## Additional labels + ## + additionalLabels: {} + # foo: bar + + ## defines the labels which are transferred from the associated Kubernetes Service object onto the ingested metrics. + ## https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#servicemonitor + targetLabels: [] + +## Component scraping kube proxy +## +kubeProxy: + enabled: true + + ## If your kube proxy is not deployed as a pod, specify IPs it can be found on + ## + endpoints: [] + # - 10.141.4.22 + # - 10.141.4.23 + # - 10.141.4.24 + + service: + enabled: true + port: 10249 + targetPort: 10249 + ipDualStack: + enabled: false + ipFamilies: ["IPv6", "IPv4"] + ipFamilyPolicy: "PreferDualStack" + # selector: + # k8s-app: kube-proxy + + serviceMonitor: + enabled: true + ## Scrape interval. If not set, the Prometheus default scrape interval is used. + ## + interval: "" + + ## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted. + ## + sampleLimit: 0 + + ## TargetLimit defines a limit on the number of scraped targets that will be accepted. + ## + targetLimit: 0 + + ## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelLimit: 0 + + ## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelNameLengthLimit: 0 + + ## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelValueLengthLimit: 0 + + ## proxyUrl: URL of a proxy that should be used for scraping. + ## + proxyUrl: "" + + ## port: Name of the port the metrics will be scraped from + ## + port: http-metrics + + jobLabel: jobLabel + selector: {} + # matchLabels: + # k8s-app: kube-proxy + + ## Enable scraping kube-proxy over https. + ## Requires proper certs (not self-signed) and delegated authentication/authorization checks + ## + https: false + + ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion. + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig + ## + metricRelabelings: [] + # - action: keep + # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' + # sourceLabels: [__name__] + + ## RelabelConfigs to apply to samples before scraping + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig + ## + relabelings: [] + # - action: keep + # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' + # sourceLabels: [__name__] + + ## Additional labels + ## + additionalLabels: {} + # foo: bar + + ## defines the labels which are transferred from the associated Kubernetes Service object onto the ingested metrics. + ## https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#servicemonitor + targetLabels: [] + +## Component scraping kube state metrics +## +kubeStateMetrics: + enabled: true + +## Configuration for kube-state-metrics subchart +## +kube-state-metrics: + namespaceOverride: "" + rbac: + create: true + releaseLabel: true + prometheus: + monitor: + enabled: true + + ## Scrape interval. If not set, the Prometheus default scrape interval is used. + ## + interval: "" + + ## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted. + ## + sampleLimit: 0 + + ## TargetLimit defines a limit on the number of scraped targets that will be accepted. + ## + targetLimit: 0 + + ## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelLimit: 0 + + ## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelNameLengthLimit: 0 + + ## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelValueLengthLimit: 0 + + ## Scrape Timeout. If not set, the Prometheus default scrape timeout is used. + ## + scrapeTimeout: "" + + ## proxyUrl: URL of a proxy that should be used for scraping. + ## + proxyUrl: "" + + # Keep labels from scraped data, overriding server-side labels + ## + honorLabels: true + + ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion. + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig + ## + metricRelabelings: [] + # - action: keep + # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' + # sourceLabels: [__name__] + + ## RelabelConfigs to apply to samples before scraping + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig + ## + relabelings: [] + # - sourceLabels: [__meta_kubernetes_pod_node_name] + # separator: ; + # regex: ^(.*)$ + # targetLabel: nodename + # replacement: $1 + # action: replace + + selfMonitor: + enabled: false + +## Deploy node exporter as a daemonset to all nodes +## +nodeExporter: + enabled: true + operatingSystems: + linux: + enabled: true + aix: + enabled: true + darwin: + enabled: true + + ## ForceDeployDashboard Create dashboard configmap even if nodeExporter deployment has been disabled + ## + forceDeployDashboards: false + +## Configuration for prometheus-node-exporter subchart +## +prometheus-node-exporter: + namespaceOverride: "" + podLabels: + ## Add the 'node-exporter' label to be used by serviceMonitor to match standard common usage in rules and grafana dashboards + ## + jobLabel: node-exporter + releaseLabel: true + extraArgs: + - --collector.filesystem.mount-points-exclude=^/(dev|proc|sys|var/lib/docker/.+|var/lib/kubelet/.+)($|/) + - --collector.filesystem.fs-types-exclude=^(autofs|binfmt_misc|bpf|cgroup2?|configfs|debugfs|devpts|devtmpfs|fusectl|hugetlbfs|iso9660|mqueue|nsfs|overlay|proc|procfs|pstore|rpc_pipefs|securityfs|selinuxfs|squashfs|sysfs|tracefs)$ + service: + portName: http-metrics + ipDualStack: + enabled: false + ipFamilies: ["IPv6", "IPv4"] + ipFamilyPolicy: "PreferDualStack" + labels: + jobLabel: node-exporter + + prometheus: + monitor: + enabled: true + + jobLabel: jobLabel + + ## Scrape interval. If not set, the Prometheus default scrape interval is used. + ## + interval: "" + + ## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted. + ## + sampleLimit: 0 + + ## TargetLimit defines a limit on the number of scraped targets that will be accepted. + ## + targetLimit: 0 + + ## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelLimit: 0 + + ## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelNameLengthLimit: 0 + + ## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelValueLengthLimit: 0 + + ## How long until a scrape request times out. If not set, the Prometheus default scape timeout is used. + ## + scrapeTimeout: "" + + ## proxyUrl: URL of a proxy that should be used for scraping. + ## + proxyUrl: "" + + ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion. + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig + ## + metricRelabelings: [] + # - sourceLabels: [__name__] + # separator: ; + # regex: ^node_mountstats_nfs_(event|operations|transport)_.+ + # replacement: $1 + # action: drop + + ## RelabelConfigs to apply to samples before scraping + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#relabelconfig + ## + relabelings: [] + # - sourceLabels: [__meta_kubernetes_pod_node_name] + # separator: ; + # regex: ^(.*)$ + # targetLabel: nodename + # replacement: $1 + # action: replace + + ## Attach node metadata to discovered targets. Requires Prometheus v2.35.0 and above. + ## + # attachMetadata: + # node: false + + rbac: + ## If true, create PSPs for node-exporter + ## + pspEnabled: false + +## Manages Prometheus and Alertmanager components +## +prometheusOperator: + enabled: true + + ## Use '{{ template "kube-prometheus-stack.fullname" . }}-operator' by default + fullnameOverride: "" + + ## Number of old replicasets to retain ## + ## The default value is 10, 0 will garbage-collect old replicasets ## + revisionHistoryLimit: 10 + + ## Strategy of the deployment + ## + strategy: {} + + ## Prometheus-Operator v0.39.0 and later support TLS natively. + ## + tls: + enabled: true + # Value must match version names from https://golang.org/pkg/crypto/tls/#pkg-constants + tlsMinVersion: VersionTLS13 + # The default webhook port is 10250 in order to work out-of-the-box in GKE private clusters and avoid adding firewall rules. + internalPort: 10250 + + ## Liveness probe for the prometheusOperator deployment + ## + livenessProbe: + enabled: true + failureThreshold: 3 + initialDelaySeconds: 0 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + ## Readiness probe for the prometheusOperator deployment + ## + readinessProbe: + enabled: true + failureThreshold: 3 + initialDelaySeconds: 0 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + + ## Admission webhook support for PrometheusRules resources added in Prometheus Operator 0.30 can be enabled to prevent incorrectly formatted + ## rules from making their way into prometheus and potentially preventing the container from starting + admissionWebhooks: + ## Valid values: Fail, Ignore, IgnoreOnInstallOnly + ## IgnoreOnInstallOnly - If Release.IsInstall returns "true", set "Ignore" otherwise "Fail" + failurePolicy: "" + ## The default timeoutSeconds is 10 and the maximum value is 30. + timeoutSeconds: 10 + enabled: true + ## A PEM encoded CA bundle which will be used to validate the webhook's server certificate. + ## If unspecified, system trust roots on the apiserver are used. + caBundle: "" + ## If enabled, generate a self-signed certificate, then patch the webhook configurations with the generated data. + ## On chart upgrades (or if the secret exists) the cert will not be re-generated. You can use this to provide your own + ## certs ahead of time if you wish. + ## + annotations: {} + # argocd.argoproj.io/hook: PreSync + # argocd.argoproj.io/hook-delete-policy: HookSucceeded + + namespaceSelector: {} + objectSelector: {} + + mutatingWebhookConfiguration: + annotations: {} + # argocd.argoproj.io/hook: PreSync + + validatingWebhookConfiguration: + annotations: {} + # argocd.argoproj.io/hook: PreSync + + deployment: + enabled: false + + ## Number of replicas + ## + replicas: 1 + + ## Strategy of the deployment + ## + strategy: {} + + # Ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ + podDisruptionBudget: {} + # maxUnavailable: 1 + # minAvailable: 1 + + ## Number of old replicasets to retain ## + ## The default value is 10, 0 will garbage-collect old replicasets ## + revisionHistoryLimit: 10 + + ## Prometheus-Operator v0.39.0 and later support TLS natively. + ## + tls: + enabled: true + # Value must match version names from https://golang.org/pkg/crypto/tls/#pkg-constants + tlsMinVersion: VersionTLS13 + # The default webhook port is 10250 in order to work out-of-the-box in GKE private clusters and avoid adding firewall rules. + internalPort: 10250 + + ## Service account for Prometheus Operator Webhook to use. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ + ## + serviceAccount: + annotations: {} + automountServiceAccountToken: false + create: true + name: "" + + ## Configuration for Prometheus operator Webhook service + ## + service: + annotations: {} + labels: {} + clusterIP: "" + ipDualStack: + enabled: false + ipFamilies: ["IPv6", "IPv4"] + ipFamilyPolicy: "PreferDualStack" + + ## Port to expose on each node + ## Only used if service.type is 'NodePort' + ## + nodePort: 31080 + + nodePortTls: 31443 + + ## Additional ports to open for Prometheus operator Webhook service + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#multi-port-services + ## + additionalPorts: [] + + ## Loadbalancer IP + ## Only use if service.type is "LoadBalancer" + ## + loadBalancerIP: "" + loadBalancerSourceRanges: [] + + ## Denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints + ## + externalTrafficPolicy: Cluster + + ## Service type + ## NodePort, ClusterIP, LoadBalancer + ## + type: ClusterIP + + ## List of IP addresses at which the Prometheus server service is available + ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips + ## + externalIPs: [] + + # ## Labels to add to the operator webhook deployment + # ## + labels: {} + + ## Annotations to add to the operator webhook deployment + ## + annotations: {} + + ## Labels to add to the operator webhook pod + ## + podLabels: {} + + ## Annotations to add to the operator webhook pod + ## + podAnnotations: {} + + ## Assign a PriorityClassName to pods if set + # priorityClassName: "" + + ## Define Log Format + # Use logfmt (default) or json logging + # logFormat: logfmt + + ## Decrease log verbosity to errors only + # logLevel: error + + ## Prometheus-operator webhook image + ## + image: + registry: quay.io + repository: prometheus-operator/admission-webhook + # if not set appVersion field from Chart.yaml is used + tag: "" + sha: "" + pullPolicy: IfNotPresent + + ## Define Log Format + # Use logfmt (default) or json logging + # logFormat: logfmt + + ## Decrease log verbosity to errors only + # logLevel: error + + + ## Liveness probe + ## + livenessProbe: + enabled: true + failureThreshold: 3 + initialDelaySeconds: 30 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + + ## Readiness probe + ## + readinessProbe: + enabled: true + failureThreshold: 3 + initialDelaySeconds: 5 + periodSeconds: 10 + successThreshold: 1 + timeoutSeconds: 1 + + ## Resource limits & requests + ## + resources: {} + # limits: + # cpu: 200m + # memory: 200Mi + # requests: + # cpu: 100m + # memory: 100Mi + + # Required for use in managed kubernetes clusters (such as AWS EKS) with custom CNI (such as calico), + # because control-plane managed by AWS cannot communicate with pods' IP CIDR and admission webhooks are not working + ## + hostNetwork: false + + ## Define which Nodes the Pods are scheduled on. + ## ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + + ## Tolerations for use with node taints + ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + # - key: "key" + # operator: "Equal" + # value: "value" + # effect: "NoSchedule" + + ## Assign custom affinity rules to the prometheus operator + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + ## + affinity: {} + # nodeAffinity: + # requiredDuringSchedulingIgnoredDuringExecution: + # nodeSelectorTerms: + # - matchExpressions: + # - key: kubernetes.io/e2e-az-name + # operator: In + # values: + # - e2e-az1 + # - e2e-az2 + dnsConfig: {} + # nameservers: + # - 1.2.3.4 + # searches: + # - ns1.svc.cluster-domain.example + # - my.dns.search.suffix + # options: + # - name: ndots + # value: "2" + # - name: edns0 + securityContext: + fsGroup: 65534 + runAsGroup: 65534 + runAsNonRoot: true + runAsUser: 65534 + seccompProfile: + type: RuntimeDefault + + ## Container-specific security context configuration + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## + containerSecurityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + capabilities: + drop: + - ALL + + ## If false then the user will opt out of automounting API credentials. + ## + automountServiceAccountToken: true + + patch: + enabled: true + image: + registry: registry.k8s.io + repository: ingress-nginx/kube-webhook-certgen + tag: v20221220-controller-v1.5.1-58-g787ea74b6 + sha: "" + pullPolicy: IfNotPresent + resources: {} + ## Provide a priority class name to the webhook patching job + ## + priorityClassName: "" + ttlSecondsAfterFinished: 60 + annotations: {} + # argocd.argoproj.io/hook: PreSync + # argocd.argoproj.io/hook-delete-policy: HookSucceeded + podAnnotations: {} + nodeSelector: {} + affinity: {} + tolerations: [] + + ## SecurityContext holds pod-level security attributes and common container settings. + ## This defaults to non root user with uid 2000 and gid 2000. *v1.PodSecurityContext false + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## + securityContext: + runAsGroup: 2000 + runAsNonRoot: true + runAsUser: 2000 + seccompProfile: + type: RuntimeDefault + ## Service account for Prometheus Operator Webhook Job Patch to use. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ + ## + serviceAccount: + create: true + annotations: {} + automountServiceAccountToken: true + + # Security context for create job container + createSecretJob: + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + capabilities: + drop: + - ALL + + # Security context for patch job container + patchWebhookJob: + securityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + capabilities: + drop: + - ALL + + # Use certmanager to generate webhook certs + certManager: + enabled: false + # self-signed root certificate + rootCert: + duration: "" # default to be 5y + admissionCert: + duration: "" # default to be 1y + # issuerRef: + # name: "issuer" + # kind: "ClusterIssuer" + + ## Namespaces to scope the interaction of the Prometheus Operator and the apiserver (allow list). + ## This is mutually exclusive with denyNamespaces. Setting this to an empty object will disable the configuration + ## + namespaces: {} + # releaseNamespace: true + # additional: + # - kube-system + + ## Namespaces not to scope the interaction of the Prometheus Operator (deny list). + ## + denyNamespaces: [] + + ## Filter namespaces to look for prometheus-operator custom resources + ## + alertmanagerInstanceNamespaces: [] + alertmanagerConfigNamespaces: [] + prometheusInstanceNamespaces: [] + thanosRulerInstanceNamespaces: [] + + ## The clusterDomain value will be added to the cluster.peer option of the alertmanager. + ## Without this specified option cluster.peer will have value alertmanager-monitoring-alertmanager-0.alertmanager-operated:9094 (default value) + ## With this specified option cluster.peer will have value alertmanager-monitoring-alertmanager-0.alertmanager-operated.namespace.svc.cluster-domain:9094 + ## + # clusterDomain: "cluster.local" + + networkPolicy: + ## Enable creation of NetworkPolicy resources. + ## + enabled: false + + ## Flavor of the network policy to use. + # Can be: + # * kubernetes for networking.k8s.io/v1/NetworkPolicy + # * cilium for cilium.io/v2/CiliumNetworkPolicy + flavor: kubernetes + + # cilium: + # egress: + + ## match labels used in selector + # matchLabels: {} + + ## Service account for Prometheus Operator to use. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ + ## + serviceAccount: + create: true + name: "" + automountServiceAccountToken: true + annotations: {} + + # -- terminationGracePeriodSeconds for container lifecycle hook + terminationGracePeriodSeconds: 30 + # -- Specify lifecycle hooks for the controller + lifecycle: {} + ## Configuration for Prometheus operator service + ## + service: + annotations: {} + labels: {} + clusterIP: "" + ipDualStack: + enabled: false + ipFamilies: ["IPv6", "IPv4"] + ipFamilyPolicy: "PreferDualStack" + + ## Port to expose on each node + ## Only used if service.type is 'NodePort' + ## + nodePort: 30080 + + nodePortTls: 30443 + + ## Additional ports to open for Prometheus operator service + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#multi-port-services + ## + additionalPorts: [] + + ## Loadbalancer IP + ## Only use if service.type is "LoadBalancer" + ## + loadBalancerIP: "" + loadBalancerSourceRanges: [] + + ## Denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints + ## + externalTrafficPolicy: Cluster + + ## Service type + ## NodePort, ClusterIP, LoadBalancer + ## + type: ClusterIP + + ## List of IP addresses at which the Prometheus server service is available + ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips + ## + externalIPs: [] + + # ## Labels to add to the operator deployment + # ## + labels: {} + + ## Annotations to add to the operator deployment + ## + annotations: {} + + ## Labels to add to the operator pod + ## + podLabels: {} + + ## Annotations to add to the operator pod + ## + podAnnotations: {} + + ## Assign a PriorityClassName to pods if set + # priorityClassName: "" + + ## Define Log Format + # Use logfmt (default) or json logging + # logFormat: logfmt + + ## Decrease log verbosity to errors only + # logLevel: error + + kubeletService: + ## If true, the operator will create and maintain a service for scraping kubelets + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/helm/prometheus-operator/README.md + ## + enabled: true + namespace: kube-system + selector: "" + ## Use '{{ template "kube-prometheus-stack.fullname" . }}-kubelet' by default + name: "" + + ## Create Endpoints objects for kubelet targets. + kubeletEndpointsEnabled: true + ## Create EndpointSlice objects for kubelet targets. + kubeletEndpointSliceEnabled: false + + ## Extra arguments to pass to prometheusOperator + # https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/operator.md + extraArgs: [] + # - --labels="cluster=talos-cluster" + + ## Create a servicemonitor for the operator + ## + serviceMonitor: + ## If true, create a serviceMonitor for prometheus operator + ## + selfMonitor: true + + ## Labels for ServiceMonitor + additionalLabels: {} + + ## Scrape interval. If not set, the Prometheus default scrape interval is used. + ## + interval: "" + + ## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted. + ## + sampleLimit: 0 + + ## TargetLimit defines a limit on the number of scraped targets that will be accepted. + ## + targetLimit: 0 + + ## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelLimit: 0 + + ## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelNameLengthLimit: 0 + + ## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelValueLengthLimit: 0 + + ## Scrape timeout. If not set, the Prometheus default scrape timeout is used. + scrapeTimeout: "" + + ## Metric relabel configs to apply to samples before ingestion. + ## + metricRelabelings: [] + # - action: keep + # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' + # sourceLabels: [__name__] + + # relabel configs to apply to samples before ingestion. + ## + relabelings: [] + # - sourceLabels: [__meta_kubernetes_pod_node_name] + # separator: ; + # regex: ^(.*)$ + # targetLabel: nodename + # replacement: $1 + # action: replace + + ## Resource limits & requests + ## + resources: {} + # limits: + # cpu: 200m + # memory: 200Mi + # requests: + # cpu: 100m + # memory: 100Mi + + ## Operator Environment + ## env: + ## VARIABLE: value + env: + GOGC: "30" + + # Required for use in managed kubernetes clusters (such as AWS EKS) with custom CNI (such as calico), + # because control-plane managed by AWS cannot communicate with pods' IP CIDR and admission webhooks are not working + ## + hostNetwork: false + + ## Define which Nodes the Pods are scheduled on. + ## ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + + ## Tolerations for use with node taints + ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + # - key: "key" + # operator: "Equal" + # value: "value" + # effect: "NoSchedule" + + ## Assign custom affinity rules to the prometheus operator + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + ## + affinity: {} + # nodeAffinity: + # requiredDuringSchedulingIgnoredDuringExecution: + # nodeSelectorTerms: + # - matchExpressions: + # - key: kubernetes.io/e2e-az-name + # operator: In + # values: + # - e2e-az1 + # - e2e-az2 + dnsConfig: {} + # nameservers: + # - 1.2.3.4 + # searches: + # - ns1.svc.cluster-domain.example + # - my.dns.search.suffix + # options: + # - name: ndots + # value: "2" + # - name: edns0 + securityContext: + fsGroup: 65534 + runAsGroup: 65534 + runAsNonRoot: true + runAsUser: 65534 + seccompProfile: + type: RuntimeDefault + + ## Container-specific security context configuration + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## + containerSecurityContext: + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + capabilities: + drop: + - ALL + + # Enable vertical pod autoscaler support for prometheus-operator + verticalPodAutoscaler: + enabled: false + + # Recommender responsible for generating recommendation for the object. + # List should be empty (then the default recommender will generate the recommendation) + # or contain exactly one recommender. + # recommenders: + # - name: custom-recommender-performance + + # List of resources that the vertical pod autoscaler can control. Defaults to cpu and memory + controlledResources: [] + # Specifies which resource values should be controlled: RequestsOnly or RequestsAndLimits. + # controlledValues: RequestsAndLimits + + # Define the max allowed resources for the pod + maxAllowed: {} + # cpu: 200m + # memory: 100Mi + # Define the min allowed resources for the pod + minAllowed: {} + # cpu: 200m + # memory: 100Mi + + updatePolicy: + # Specifies minimal number of replicas which need to be alive for VPA Updater to attempt pod eviction + # minReplicas: 1 + # Specifies whether recommended updates are applied when a Pod is started and whether recommended updates + # are applied during the life of a Pod. Possible values are "Off", "Initial", "Recreate", and "Auto". + updateMode: Auto + + ## Prometheus-operator image + ## + image: + registry: quay.io + repository: prometheus-operator/prometheus-operator + # if not set appVersion field from Chart.yaml is used + tag: "" + sha: "" + pullPolicy: IfNotPresent + + ## Prometheus image to use for prometheuses managed by the operator + ## + # prometheusDefaultBaseImage: prometheus/prometheus + + ## Prometheus image registry to use for prometheuses managed by the operator + ## + # prometheusDefaultBaseImageRegistry: quay.io + + ## Alertmanager image to use for alertmanagers managed by the operator + ## + # alertmanagerDefaultBaseImage: prometheus/alertmanager + + ## Alertmanager image registry to use for alertmanagers managed by the operator + ## + # alertmanagerDefaultBaseImageRegistry: quay.io + + ## Prometheus-config-reloader + ## + prometheusConfigReloader: + image: + registry: quay.io + repository: prometheus-operator/prometheus-config-reloader + # if not set appVersion field from Chart.yaml is used + tag: "" + sha: "" + + # add prometheus config reloader liveness and readiness probe. Default: false + enableProbe: false + + # resource config for prometheusConfigReloader + resources: {} + # requests: + # cpu: 200m + # memory: 50Mi + # limits: + # cpu: 200m + # memory: 50Mi + + ## Thanos side-car image when configured + ## + thanosImage: + registry: quay.io + repository: thanos/thanos + tag: v0.37.2 + sha: "" + + ## Set a Label Selector to filter watched prometheus and prometheusAgent + ## + prometheusInstanceSelector: "" + + ## Set a Label Selector to filter watched alertmanager + ## + alertmanagerInstanceSelector: "" + + ## Set a Label Selector to filter watched thanosRuler + thanosRulerInstanceSelector: "" + + ## Set a Field Selector to filter watched secrets + ## + secretFieldSelector: "type!=kubernetes.io/dockercfg,type!=kubernetes.io/service-account-token,type!=helm.sh/release.v1" + + ## If false then the user will opt out of automounting API credentials. + ## + automountServiceAccountToken: true + + ## Additional volumes + ## + extraVolumes: [] + + ## Additional volume mounts + ## + extraVolumeMounts: [] + +## Deploy a Prometheus instance +## +prometheus: + enabled: true + + ## Toggle prometheus into agent mode + ## Note many of features described below (e.g. rules, query, alerting, remote read, thanos) will not work in agent mode. + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/designs/prometheus-agent.md + ## + agentMode: false + + ## Annotations for Prometheus + ## + annotations: {} + + ## Configure network policy for the prometheus + networkPolicy: + enabled: false + + ## Flavor of the network policy to use. + # Can be: + # * kubernetes for networking.k8s.io/v1/NetworkPolicy + # * cilium for cilium.io/v2/CiliumNetworkPolicy + flavor: kubernetes + + # cilium: + # endpointSelector: + # egress: + # ingress: + + # egress: + # - {} + # ingress: + # - {} + # podSelector: + # matchLabels: + # app: prometheus + + ## Service account for Prometheuses to use. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ + ## + serviceAccount: + create: true + name: "" + annotations: {} + automountServiceAccountToken: true + + # Service for thanos service discovery on sidecar + # Enable this can make Thanos Query can use + # `--store=dnssrv+_grpc._tcp.${kube-prometheus-stack.fullname}-thanos-discovery.${namespace}.svc.cluster.local` to discovery + # Thanos sidecar on prometheus nodes + # (Please remember to change ${kube-prometheus-stack.fullname} and ${namespace}. Not just copy and paste!) + thanosService: + enabled: false + annotations: {} + labels: {} + + ## Denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints + ## + externalTrafficPolicy: Cluster + + ## Service type + ## + type: ClusterIP + + ## Service dual stack + ## + ipDualStack: + enabled: false + ipFamilies: ["IPv6", "IPv4"] + ipFamilyPolicy: "PreferDualStack" + + ## gRPC port config + portName: grpc + port: 10901 + targetPort: "grpc" + + ## HTTP port config (for metrics) + httpPortName: http + httpPort: 10902 + targetHttpPort: "http" + + ## ClusterIP to assign + # Default is to make this a headless service ("None") + clusterIP: "None" + + ## Port to expose on each node, if service type is NodePort + ## + nodePort: 30901 + httpNodePort: 30902 + + # ServiceMonitor to scrape Sidecar metrics + # Needs thanosService to be enabled as well + thanosServiceMonitor: + enabled: false + interval: "" + + ## Additional labels + ## + additionalLabels: {} + + ## scheme: HTTP scheme to use for scraping. Can be used with `tlsConfig` for example if using istio mTLS. + scheme: "" + + ## tlsConfig: TLS configuration to use when scraping the endpoint. For example if using istio mTLS. + ## Of type: https://github.com/coreos/prometheus-operator/blob/main/Documentation/api.md#tlsconfig + tlsConfig: {} + + bearerTokenFile: + + ## Metric relabel configs to apply to samples before ingestion. + metricRelabelings: [] + + ## relabel configs to apply to samples before ingestion. + relabelings: [] + + # Service for external access to sidecar + # Enabling this creates a service to expose thanos-sidecar outside the cluster. + thanosServiceExternal: + enabled: false + annotations: {} + labels: {} + loadBalancerIP: "" + loadBalancerSourceRanges: [] + + ## gRPC port config + portName: grpc + port: 10901 + targetPort: "grpc" + + ## HTTP port config (for metrics) + httpPortName: http + httpPort: 10902 + targetHttpPort: "http" + + ## Denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints + ## + externalTrafficPolicy: Cluster + + ## Service type + ## + type: LoadBalancer + + ## Port to expose on each node + ## + nodePort: 30901 + httpNodePort: 30902 + + ## Configuration for Prometheus service + ## + service: + annotations: {} + labels: {} + clusterIP: "" + ipDualStack: + enabled: false + ipFamilies: ["IPv6", "IPv4"] + ipFamilyPolicy: "PreferDualStack" + + ## Port for Prometheus Service to listen on + ## + port: 9090 + + ## To be used with a proxy extraContainer port + targetPort: 9090 + + ## Port for Prometheus Reloader to listen on + ## + reloaderWebPort: 8080 + + ## List of IP addresses at which the Prometheus server service is available + ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips + ## + externalIPs: [] + + ## Port to expose on each node + ## Only used if service.type is 'NodePort' + ## + nodePort: 30090 + + ## Loadbalancer IP + ## Only use if service.type is "LoadBalancer" + loadBalancerIP: "" + loadBalancerSourceRanges: [] + + ## Denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints + ## + externalTrafficPolicy: Cluster + + ## Service type + ## + type: ClusterIP + + ## Additional ports to open for Prometheus service + ## + additionalPorts: [] + # additionalPorts: + # - name: oauth-proxy + # port: 8081 + # targetPort: 8081 + # - name: oauth-metrics + # port: 8082 + # targetPort: 8082 + + ## Consider that all endpoints are considered "ready" even if the Pods themselves are not + ## Ref: https://kubernetes.io/docs/reference/kubernetes-api/service-resources/service-v1/#ServiceSpec + publishNotReadyAddresses: false + + ## If you want to make sure that connections from a particular client are passed to the same Pod each time + ## Accepts 'ClientIP' or 'None' + ## + sessionAffinity: None + + ## If you want to modify the ClientIP sessionAffinity timeout + ## The value must be >0 && <=86400(for 1 day) if ServiceAffinity == "ClientIP" + ## + sessionAffinityConfig: + clientIP: + timeoutSeconds: 10800 + + ## Configuration for creating a separate Service for each statefulset Prometheus replica + ## + servicePerReplica: + enabled: false + annotations: {} + + ## Port for Prometheus Service per replica to listen on + ## + port: 9090 + + ## To be used with a proxy extraContainer port + targetPort: 9090 + + ## Port to expose on each node + ## Only used if servicePerReplica.type is 'NodePort' + ## + nodePort: 30091 + + ## Loadbalancer source IP ranges + ## Only used if servicePerReplica.type is "LoadBalancer" + loadBalancerSourceRanges: [] + + ## Denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints + ## + externalTrafficPolicy: Cluster + + ## Service type + ## + type: ClusterIP + + ## Service dual stack + ## + ipDualStack: + enabled: false + ipFamilies: ["IPv6", "IPv4"] + ipFamilyPolicy: "PreferDualStack" + + ## Configure pod disruption budgets for Prometheus + ## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/#specifying-a-poddisruptionbudget + ## + podDisruptionBudget: + enabled: false + minAvailable: 1 + maxUnavailable: "" + + # Ingress exposes thanos sidecar outside the cluster + thanosIngress: + enabled: false + + # For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName + # See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress + # ingressClassName: nginx + + annotations: {} + labels: {} + servicePort: 10901 + + ## Port to expose on each node + ## Only used if service.type is 'NodePort' + ## + nodePort: 30901 + + ## Hosts must be provided if Ingress is enabled. + ## + hosts: [] + # - thanos-gateway.domain.com + + ## Paths to use for ingress rules + ## + paths: [] + # - / + + ## For Kubernetes >= 1.18 you should specify the pathType (determines how Ingress paths should be matched) + ## See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#better-path-matching-with-path-types + # pathType: ImplementationSpecific + + ## TLS configuration for Thanos Ingress + ## Secret must be manually created in the namespace + ## + tls: [] + # - secretName: thanos-gateway-tls + # hosts: + # - thanos-gateway.domain.com + # + + ## ExtraSecret can be used to store various data in an extra secret + ## (use it for example to store hashed basic auth credentials) + extraSecret: + ## if not set, name will be auto generated + # name: "" + annotations: {} + data: {} + # auth: | + # foo:$apr1$OFG3Xybp$ckL0FHDAkoXYIlH9.cysT0 + # someoneelse:$apr1$DMZX2Z4q$6SbQIfyuLQd.xmo/P0m2c. + + ingress: + enabled: false + + # For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName + # See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress + # ingressClassName: nginx + + annotations: {} + labels: {} + + ## Redirect ingress to an additional defined port on the service + # servicePort: 8081 + + ## Hostnames. + ## Must be provided if Ingress is enabled. + ## + # hosts: + # - prometheus.domain.com + hosts: [] + + ## Paths to use for ingress rules - one path should match the prometheusSpec.routePrefix + ## + paths: [] + # - / + + ## For Kubernetes >= 1.18 you should specify the pathType (determines how Ingress paths should be matched) + ## See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#better-path-matching-with-path-types + # pathType: ImplementationSpecific + + ## TLS configuration for Prometheus Ingress + ## Secret must be manually created in the namespace + ## + tls: [] + # - secretName: prometheus-general-tls + # hosts: + # - prometheus.example.com + + # -- BETA: Configure the gateway routes for the chart here. + # More routes can be added by adding a dictionary key like the 'main' route. + # Be aware that this is an early beta of this feature, + # kube-prometheus-stack does not guarantee this works and is subject to change. + # Being BETA this can/will change in the future without notice, do not use unless you want to take that risk + # [[ref]](https://gateway-api.sigs.k8s.io/references/spec/#gateway.networking.k8s.io%2fv1alpha2) + route: + main: + # -- Enables or disables the route + enabled: false + + # -- Set the route apiVersion, e.g. gateway.networking.k8s.io/v1 or gateway.networking.k8s.io/v1alpha2 + apiVersion: gateway.networking.k8s.io/v1 + # -- Set the route kind + # Valid options are GRPCRoute, HTTPRoute, TCPRoute, TLSRoute, UDPRoute + kind: HTTPRoute + + annotations: {} + labels: {} + + hostnames: [] + # - my-filter.example.com + parentRefs: [] + # - name: acme-gw + + matches: + - path: + type: PathPrefix + value: / + + ## Filters define the filters that are applied to requests that match this rule. + filters: [] + + ## Additional custom rules that can be added to the route + additionalRules: [] + + ## Configuration for creating an Ingress that will map to each Prometheus replica service + ## prometheus.servicePerReplica must be enabled + ## + ingressPerReplica: + enabled: false + + # For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName + # See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress + # ingressClassName: nginx + + annotations: {} + labels: {} + + ## Final form of the hostname for each per replica ingress is + ## {{ ingressPerReplica.hostPrefix }}-{{ $replicaNumber }}.{{ ingressPerReplica.hostDomain }} + ## + ## Prefix for the per replica ingress that will have `-$replicaNumber` + ## appended to the end + hostPrefix: "" + ## Domain that will be used for the per replica ingress + hostDomain: "" + + ## Paths to use for ingress rules + ## + paths: [] + # - / + + ## For Kubernetes >= 1.18 you should specify the pathType (determines how Ingress paths should be matched) + ## See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#better-path-matching-with-path-types + # pathType: ImplementationSpecific + + ## Secret name containing the TLS certificate for Prometheus per replica ingress + ## Secret must be manually created in the namespace + tlsSecretName: "" + + ## Separated secret for each per replica Ingress. Can be used together with cert-manager + ## + tlsSecretPerReplica: + enabled: false + ## Final form of the secret for each per replica ingress is + ## {{ tlsSecretPerReplica.prefix }}-{{ $replicaNumber }} + ## + prefix: "prometheus" + + ## Configure additional options for default pod security policy for Prometheus + ## ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ + podSecurityPolicy: + allowedCapabilities: [] + allowedHostPaths: [] + volumes: [] + + serviceMonitor: + ## If true, create a serviceMonitor for prometheus + ## + selfMonitor: true + + ## Scrape interval. If not set, the Prometheus default scrape interval is used. + ## + interval: "" + + ## Additional labels + ## + additionalLabels: {} + + ## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted. + ## + sampleLimit: 0 + + ## TargetLimit defines a limit on the number of scraped targets that will be accepted. + ## + targetLimit: 0 + + ## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelLimit: 0 + + ## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelNameLengthLimit: 0 + + ## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer. + ## + labelValueLengthLimit: 0 + + ## scheme: HTTP scheme to use for scraping. Can be used with `tlsConfig` for example if using istio mTLS. + scheme: "" + + ## tlsConfig: TLS configuration to use when scraping the endpoint. For example if using istio mTLS. + ## Of type: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#tlsconfig + tlsConfig: {} + + bearerTokenFile: + + ## Metric relabel configs to apply to samples before ingestion. + ## + metricRelabelings: [] + # - action: keep + # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+' + # sourceLabels: [__name__] + + # relabel configs to apply to samples before ingestion. + ## + relabelings: [] + # - sourceLabels: [__meta_kubernetes_pod_node_name] + # separator: ; + # regex: ^(.*)$ + # targetLabel: nodename + # replacement: $1 + # action: replace + + ## Additional Endpoints + ## + additionalEndpoints: [] + # - port: oauth-metrics + # path: /metrics + + ## Settings affecting prometheusSpec + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#prometheusspec + ## + prometheusSpec: + ## Statefulset's persistent volume claim retention policy + ## whenDeleted and whenScaled determine whether + ## statefulset's PVCs are deleted (true) or retained (false) + ## on scaling down and deleting statefulset, respectively. + ## Requires Kubernetes version 1.27.0+. + ## Ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#persistentvolumeclaim-retention + persistentVolumeClaimRetentionPolicy: {} + # whenDeleted: Retain + # whenScaled: Retain + + ## If true, pass --storage.tsdb.max-block-duration=2h to prometheus. This is already done if using Thanos + ## + ## AutomountServiceAccountToken indicates whether a service account token should be automatically mounted in the pod, + ## If the field isn’t set, the operator mounts the service account token by default. + ## Warning: be aware that by default, Prometheus requires the service account token for Kubernetes service discovery, + ## It is possible to use strategic merge patch to project the service account token into the ‘prometheus’ container. + automountServiceAccountToken: true + + disableCompaction: false + ## APIServerConfig + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#apiserverconfig + ## + apiserverConfig: {} + + ## Allows setting additional arguments for the Prometheus container + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#monitoring.coreos.com/v1.Prometheus + additionalArgs: [] + + ## Interval between consecutive scrapes. + ## Defaults to 30s. + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/release-0.44/pkg/prometheus/promcfg.go#L180-L183 + ## + scrapeInterval: "" + + ## Number of seconds to wait for target to respond before erroring + ## + scrapeTimeout: "" + + ## List of scrape classes to expose to scraping objects such as + ## PodMonitors, ServiceMonitors, Probes and ScrapeConfigs. + ## + scrapeClasses: [] + # - name: istio-mtls + # default: false + # tlsConfig: + # caFile: /etc/prometheus/secrets/istio.default/root-cert.pem + # certFile: /etc/prometheus/secrets/istio.default/cert-chain.pem + + ## Interval between consecutive evaluations. + ## + evaluationInterval: "" + + ## ListenLocal makes the Prometheus server listen on loopback, so that it does not bind against the Pod IP. + ## + listenLocal: false + + ## EnableAdminAPI enables Prometheus the administrative HTTP API which includes functionality such as deleting time series. + ## This is disabled by default. + ## ref: https://prometheus.io/docs/prometheus/latest/querying/api/#tsdb-admin-apis + ## + enableAdminAPI: false + + ## Sets version of Prometheus overriding the Prometheus version as derived + ## from the image tag. Useful in cases where the tag does not follow semver v2. + version: "" + + ## WebTLSConfig defines the TLS parameters for HTTPS + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#webtlsconfig + web: {} + + ## Exemplars related settings that are runtime reloadable. + ## It requires to enable the exemplar storage feature to be effective. + exemplars: "" + ## Maximum number of exemplars stored in memory for all series. + ## If not set, Prometheus uses its default value. + ## A value of zero or less than zero disables the storage. + # maxSize: 100000 + + # EnableFeatures API enables access to Prometheus disabled features. + # ref: https://prometheus.io/docs/prometheus/latest/disabled_features/ + enableFeatures: [] + # - exemplar-storage + + ## Image of Prometheus. + ## + image: + registry: quay.io + repository: prometheus/prometheus + tag: v3.1.0 + sha: "" + + ## Tolerations for use with node taints + ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + # - key: "key" + # operator: "Equal" + # value: "value" + # effect: "NoSchedule" + + ## If specified, the pod's topology spread constraints. + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ + ## + topologySpreadConstraints: [] + # - maxSkew: 1 + # topologyKey: topology.kubernetes.io/zone + # whenUnsatisfiable: DoNotSchedule + # labelSelector: + # matchLabels: + # app: prometheus + + ## Alertmanagers to which alerts will be sent + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#alertmanagerendpoints + ## + ## Default configuration will connect to the alertmanager deployed as part of this release + ## + alertingEndpoints: [] + # - name: "" + # namespace: "" + # port: http + # scheme: http + # pathPrefix: "" + # tlsConfig: {} + # bearerTokenFile: "" + # apiVersion: v2 + + ## External labels to add to any time series or alerts when communicating with external systems + ## + externalLabels: {} + + ## enable --web.enable-remote-write-receiver flag on prometheus-server + ## + enableRemoteWriteReceiver: false + + ## Name of the external label used to denote replica name + ## + replicaExternalLabelName: "" + + ## If true, the Operator won't add the external label used to denote replica name + ## + replicaExternalLabelNameClear: false + + ## Name of the external label used to denote Prometheus instance name + ## + prometheusExternalLabelName: "" + + ## If true, the Operator won't add the external label used to denote Prometheus instance name + ## + prometheusExternalLabelNameClear: false + + ## External URL at which Prometheus will be reachable. + ## + externalUrl: "" + + ## Define which Nodes the Pods are scheduled on. + ## ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + + ## Secrets is a list of Secrets in the same namespace as the Prometheus object, which shall be mounted into the Prometheus Pods. + ## The Secrets are mounted into /etc/prometheus/secrets/. Secrets changes after initial creation of a Prometheus object are not + ## reflected in the running Pods. To change the secrets mounted into the Prometheus Pods, the object must be deleted and recreated + ## with the new list of secrets. + ## + secrets: [] + + ## ConfigMaps is a list of ConfigMaps in the same namespace as the Prometheus object, which shall be mounted into the Prometheus Pods. + ## The ConfigMaps are mounted into /etc/prometheus/configmaps/. + ## + configMaps: [] + + ## QuerySpec defines the query command line flags when starting Prometheus. + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#queryspec + ## + query: {} + + ## If nil, select own namespace. Namespaces to be selected for PrometheusRules discovery. + ruleNamespaceSelector: {} + ## Example which selects PrometheusRules in namespaces with label "prometheus" set to "somelabel" + # ruleNamespaceSelector: + # matchLabels: + # prometheus: somelabel + + ## If true, a nil or {} value for prometheus.prometheusSpec.ruleSelector will cause the + ## prometheus resource to be created with selectors based on values in the helm deployment, + ## which will also match the PrometheusRule resources created + ## + ruleSelectorNilUsesHelmValues: true + + ## PrometheusRules to be selected for target discovery. + ## If {}, select all PrometheusRules + ## + ruleSelector: {} + ## Example which select all PrometheusRules resources + ## with label "prometheus" with values any of "example-rules" or "example-rules-2" + # ruleSelector: + # matchExpressions: + # - key: prometheus + # operator: In + # values: + # - example-rules + # - example-rules-2 + # + ## Example which select all PrometheusRules resources with label "role" set to "example-rules" + # ruleSelector: + # matchLabels: + # role: example-rules + + ## If true, a nil or {} value for prometheus.prometheusSpec.serviceMonitorSelector will cause the + ## prometheus resource to be created with selectors based on values in the helm deployment, + ## which will also match the servicemonitors created + ## + serviceMonitorSelectorNilUsesHelmValues: true + + ## ServiceMonitors to be selected for target discovery. + ## If {}, select all ServiceMonitors + ## + serviceMonitorSelector: {} + ## Example which selects ServiceMonitors with label "prometheus" set to "somelabel" + # serviceMonitorSelector: + # matchLabels: + # prometheus: somelabel + + ## Namespaces to be selected for ServiceMonitor discovery. + ## + serviceMonitorNamespaceSelector: {} + ## Example which selects ServiceMonitors in namespaces with label "prometheus" set to "somelabel" + # serviceMonitorNamespaceSelector: + # matchLabels: + # prometheus: somelabel + + ## If true, a nil or {} value for prometheus.prometheusSpec.podMonitorSelector will cause the + ## prometheus resource to be created with selectors based on values in the helm deployment, + ## which will also match the podmonitors created + ## + podMonitorSelectorNilUsesHelmValues: true + + ## PodMonitors to be selected for target discovery. + ## If {}, select all PodMonitors + ## + podMonitorSelector: {} + ## Example which selects PodMonitors with label "prometheus" set to "somelabel" + # podMonitorSelector: + # matchLabels: + # prometheus: somelabel + + ## If nil, select own namespace. Namespaces to be selected for PodMonitor discovery. + podMonitorNamespaceSelector: {} + ## Example which selects PodMonitor in namespaces with label "prometheus" set to "somelabel" + # podMonitorNamespaceSelector: + # matchLabels: + # prometheus: somelabel + + ## If true, a nil or {} value for prometheus.prometheusSpec.probeSelector will cause the + ## prometheus resource to be created with selectors based on values in the helm deployment, + ## which will also match the probes created + ## + probeSelectorNilUsesHelmValues: true + + ## Probes to be selected for target discovery. + ## If {}, select all Probes + ## + probeSelector: {} + ## Example which selects Probes with label "prometheus" set to "somelabel" + # probeSelector: + # matchLabels: + # prometheus: somelabel + + ## If nil, select own namespace. Namespaces to be selected for Probe discovery. + probeNamespaceSelector: {} + ## Example which selects Probe in namespaces with label "prometheus" set to "somelabel" + # probeNamespaceSelector: + # matchLabels: + # prometheus: somelabel + + ## If true, a nil or {} value for prometheus.prometheusSpec.scrapeConfigSelector will cause the + ## prometheus resource to be created with selectors based on values in the helm deployment, + ## which will also match the scrapeConfigs created + ## + ## If null and scrapeConfigSelector is also null, exclude field from the prometheusSpec + ## (keeping downward compatibility with older versions of CRD) + ## + scrapeConfigSelectorNilUsesHelmValues: true + + ## scrapeConfigs to be selected for target discovery. + ## If {}, select all scrapeConfigs + ## + scrapeConfigSelector: {} + ## Example which selects scrapeConfigs with label "prometheus" set to "somelabel" + # scrapeConfigSelector: + # matchLabels: + # prometheus: somelabel + + ## If nil, select own namespace. Namespaces to be selected for scrapeConfig discovery. + ## If null, exclude the field from the prometheusSpec (keeping downward compatibility with older versions of CRD) + scrapeConfigNamespaceSelector: {} + ## Example which selects scrapeConfig in namespaces with label "prometheus" set to "somelabel" + # scrapeConfigNamespaceSelector: + # matchLabels: + # prometheus: somelabel + + ## How long to retain metrics + ## + retention: 10d + + ## Maximum size of metrics + ## + retentionSize: "" + + ## Allow out-of-order/out-of-bounds samples ingested into Prometheus for a specified duration + ## See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#tsdb + tsdb: + outOfOrderTimeWindow: 0s + + ## Enable compression of the write-ahead log using Snappy. + ## + walCompression: true + + ## If true, the Operator won't process any Prometheus configuration changes + ## + paused: false + + ## Number of replicas of each shard to deploy for a Prometheus deployment. + ## Number of replicas multiplied by shards is the total number of Pods created. + ## + replicas: 1 + + ## EXPERIMENTAL: Number of shards to distribute targets onto. + ## Number of replicas multiplied by shards is the total number of Pods created. + ## Note that scaling down shards will not reshard data onto remaining instances, it must be manually moved. + ## Increasing shards will not reshard data either but it will continue to be available from the same instances. + ## To query globally use Thanos sidecar and Thanos querier or remote write data to a central location. + ## Sharding is done on the content of the `__address__` target meta-label. + ## + shards: 1 + + ## Log level for Prometheus be configured in + ## + logLevel: info + + ## Log format for Prometheus be configured in + ## + logFormat: logfmt + + ## Prefix used to register routes, overriding externalUrl route. + ## Useful for proxies that rewrite URLs. + ## + routePrefix: / + + ## Standard object's metadata. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#metadata + ## Metadata Labels and Annotations gets propagated to the prometheus pods. + ## + podMetadata: {} + # labels: + # app: prometheus + # k8s-app: prometheus + + ## Pod anti-affinity can prevent the scheduler from placing Prometheus replicas on the same node. + ## The default value "soft" means that the scheduler should *prefer* to not schedule two replica pods onto the same node but no guarantee is provided. + ## The value "hard" means that the scheduler is *required* to not schedule two replica pods onto the same node. + ## The value "" will disable pod anti-affinity so that no anti-affinity rules will be configured. + podAntiAffinity: "soft" + + ## If anti-affinity is enabled sets the topologyKey to use for anti-affinity. + ## This can be changed to, for example, failure-domain.beta.kubernetes.io/zone + ## + podAntiAffinityTopologyKey: kubernetes.io/hostname + + ## Assign custom affinity rules to the prometheus instance + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + ## + affinity: {} + # nodeAffinity: + # requiredDuringSchedulingIgnoredDuringExecution: + # nodeSelectorTerms: + # - matchExpressions: + # - key: kubernetes.io/e2e-az-name + # operator: In + # values: + # - e2e-az1 + # - e2e-az2 + + ## The remote_read spec configuration for Prometheus. + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#remotereadspec + remoteRead: [] + # - url: http://remote1/read + ## additionalRemoteRead is appended to remoteRead + additionalRemoteRead: [] + + ## The remote_write spec configuration for Prometheus. + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#remotewritespec + remoteWrite: [] + # - url: http://remote1/push + ## additionalRemoteWrite is appended to remoteWrite + additionalRemoteWrite: [] + + ## Enable/Disable Grafana dashboards provisioning for prometheus remote write feature + remoteWriteDashboards: false + + ## Resource limits & requests + ## + resources: {} + # requests: + # memory: 400Mi + + ## Prometheus StorageSpec for persistent data + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/user-guides/storage.md + ## + storageSpec: {} + ## Using PersistentVolumeClaim + ## + # volumeClaimTemplate: + # spec: + # storageClassName: gluster + # accessModes: ["ReadWriteOnce"] + # resources: + # requests: + # storage: 50Gi + # selector: {} + + ## Using tmpfs volume + ## + # emptyDir: + # medium: Memory + + # Additional volumes on the output StatefulSet definition. + volumes: [] + + # Additional VolumeMounts on the output StatefulSet definition. + volumeMounts: [] + + ## AdditionalScrapeConfigs allows specifying additional Prometheus scrape configurations. Scrape configurations + ## are appended to the configurations generated by the Prometheus Operator. Job configurations must have the form + ## as specified in the official Prometheus documentation: + ## https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config. As scrape configs are + ## appended, the user is responsible to make sure it is valid. Note that using this feature may expose the possibility + ## to break upgrades of Prometheus. It is advised to review Prometheus release notes to ensure that no incompatible + ## scrape configs are going to break Prometheus after the upgrade. + ## AdditionalScrapeConfigs can be defined as a list or as a templated string. + ## + ## The scrape configuration example below will find master nodes, provided they have the name .*mst.*, relabel the + ## port to 2379 and allow etcd scraping provided it is running on all Kubernetes master nodes + ## + additionalScrapeConfigs: [] + # - job_name: kube-etcd + # kubernetes_sd_configs: + # - role: node + # scheme: https + # tls_config: + # ca_file: /etc/prometheus/secrets/etcd-client-cert/etcd-ca + # cert_file: /etc/prometheus/secrets/etcd-client-cert/etcd-client + # key_file: /etc/prometheus/secrets/etcd-client-cert/etcd-client-key + # relabel_configs: + # - action: labelmap + # regex: __meta_kubernetes_node_label_(.+) + # - source_labels: [__address__] + # action: replace + # targetLabel: __address__ + # regex: ([^:;]+):(\d+) + # replacement: ${1}:2379 + # - source_labels: [__meta_kubernetes_node_name] + # action: keep + # regex: .*mst.* + # - source_labels: [__meta_kubernetes_node_name] + # action: replace + # targetLabel: node + # regex: (.*) + # replacement: ${1} + # metric_relabel_configs: + # - regex: (kubernetes_io_hostname|failure_domain_beta_kubernetes_io_region|beta_kubernetes_io_os|beta_kubernetes_io_arch|beta_kubernetes_io_instance_type|failure_domain_beta_kubernetes_io_zone) + # action: labeldrop + # + ## If scrape config contains a repetitive section, you may want to use a template. + ## In the following example, you can see how to define `gce_sd_configs` for multiple zones + # additionalScrapeConfigs: | + # - job_name: "node-exporter" + # gce_sd_configs: + # {{range $zone := .Values.gcp_zones}} + # - project: "project1" + # zone: "{{$zone}}" + # port: 9100 + # {{end}} + # relabel_configs: + # ... + + + ## If additional scrape configurations are already deployed in a single secret file you can use this section. + ## Expected values are the secret name and key + ## Cannot be used with additionalScrapeConfigs + additionalScrapeConfigsSecret: {} + # enabled: false + # name: + # key: + + ## additionalPrometheusSecretsAnnotations allows to add annotations to the kubernetes secret. This can be useful + ## when deploying via spinnaker to disable versioning on the secret, strategy.spinnaker.io/versioned: 'false' + additionalPrometheusSecretsAnnotations: {} + + ## AdditionalAlertManagerConfigs allows for manual configuration of alertmanager jobs in the form as specified + ## in the official Prometheus documentation https://prometheus.io/docs/prometheus/latest/configuration/configuration/#. + ## AlertManager configurations specified are appended to the configurations generated by the Prometheus Operator. + ## As AlertManager configs are appended, the user is responsible to make sure it is valid. Note that using this + ## feature may expose the possibility to break upgrades of Prometheus. It is advised to review Prometheus release + ## notes to ensure that no incompatible AlertManager configs are going to break Prometheus after the upgrade. + ## + additionalAlertManagerConfigs: [] + # - consul_sd_configs: + # - server: consul.dev.test:8500 + # scheme: http + # datacenter: dev + # tag_separator: ',' + # services: + # - metrics-prometheus-alertmanager + + ## If additional alertmanager configurations are already deployed in a single secret, or you want to manage + ## them separately from the helm deployment, you can use this section. + ## Expected values are the secret name and key + ## Cannot be used with additionalAlertManagerConfigs + additionalAlertManagerConfigsSecret: {} + # name: + # key: + # optional: false + + ## AdditionalAlertRelabelConfigs allows specifying Prometheus alert relabel configurations. Alert relabel configurations specified are appended + ## to the configurations generated by the Prometheus Operator. Alert relabel configurations specified must have the form as specified in the + ## official Prometheus documentation: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#alert_relabel_configs. + ## As alert relabel configs are appended, the user is responsible to make sure it is valid. Note that using this feature may expose the + ## possibility to break upgrades of Prometheus. It is advised to review Prometheus release notes to ensure that no incompatible alert relabel + ## configs are going to break Prometheus after the upgrade. + ## + additionalAlertRelabelConfigs: [] + # - separator: ; + # regex: prometheus_replica + # replacement: $1 + # action: labeldrop + + ## If additional alert relabel configurations are already deployed in a single secret, or you want to manage + ## them separately from the helm deployment, you can use this section. + ## Expected values are the secret name and key + ## Cannot be used with additionalAlertRelabelConfigs + additionalAlertRelabelConfigsSecret: {} + # name: + # key: + + ## SecurityContext holds pod-level security attributes and common container settings. + ## This defaults to non root user with uid 1000 and gid 2000. + ## https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md + ## + securityContext: + runAsGroup: 2000 + runAsNonRoot: true + runAsUser: 1000 + fsGroup: 2000 + seccompProfile: + type: RuntimeDefault + + ## Priority class assigned to the Pods + ## + priorityClassName: "" + + ## Thanos configuration allows configuring various aspects of a Prometheus server in a Thanos environment. + ## This section is experimental, it may change significantly without deprecation notice in any release. + ## This is experimental and may change significantly without backward compatibility in any release. + ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#thanosspec + ## + thanos: {} + # secretProviderClass: + # provider: gcp + # parameters: + # secrets: | + # - resourceName: "projects/$PROJECT_ID/secrets/testsecret/versions/latest" + # fileName: "objstore.yaml" + ## ObjectStorageConfig configures object storage in Thanos. + # objectStorageConfig: + # # use existing secret, if configured, objectStorageConfig.secret will not be used + # existingSecret: {} + # # name: "" + # # key: "" + # # will render objectStorageConfig secret data and configure it to be used by Thanos custom resource, + # # ignored when prometheusspec.thanos.objectStorageConfig.existingSecret is set + # # https://thanos.io/tip/thanos/storage.md/#s3 + # secret: {} + # # type: S3 + # # config: + # # bucket: "" + # # endpoint: "" + # # region: "" + # # access_key: "" + # # secret_key: "" + + ## Containers allows injecting additional containers. This is meant to allow adding an authentication proxy to a Prometheus pod. + ## if using proxy extraContainer update targetPort with proxy container port + containers: [] + # containers: + # - name: oauth-proxy + # image: quay.io/oauth2-proxy/oauth2-proxy:v7.5.1 + # args: + # - --upstream=http://127.0.0.1:9090 + # - --http-address=0.0.0.0:8081 + # - --metrics-address=0.0.0.0:8082 + # - ... + # ports: + # - containerPort: 8081 + # name: oauth-proxy + # protocol: TCP + # - containerPort: 8082 + # name: oauth-metrics + # protocol: TCP + # resources: {} + + ## InitContainers allows injecting additional initContainers. This is meant to allow doing some changes + ## (permissions, dir tree) on mounted volumes before starting prometheus + initContainers: [] + + ## PortName to use for Prometheus. + ## + portName: "http-web" + + ## ArbitraryFSAccessThroughSMs configures whether configuration based on a service monitor can access arbitrary files + ## on the file system of the Prometheus container e.g. bearer token files. + arbitraryFSAccessThroughSMs: false + + ## OverrideHonorLabels if set to true overrides all user configured honor_labels. If HonorLabels is set in ServiceMonitor + ## or PodMonitor to true, this overrides honor_labels to false. + overrideHonorLabels: false + + ## OverrideHonorTimestamps allows to globally enforce honoring timestamps in all scrape configs. + overrideHonorTimestamps: false + + ## When ignoreNamespaceSelectors is set to true, namespaceSelector from all PodMonitor, ServiceMonitor and Probe objects will be ignored, + ## they will only discover targets within the namespace of the PodMonitor, ServiceMonitor and Probe object, + ## and servicemonitors will be installed in the default service namespace. + ## Defaults to false. + ignoreNamespaceSelectors: false + + ## EnforcedNamespaceLabel enforces adding a namespace label of origin for each alert and metric that is user created. + ## The label value will always be the namespace of the object that is being created. + ## Disabled by default + enforcedNamespaceLabel: "" + + ## PrometheusRulesExcludedFromEnforce - list of prometheus rules to be excluded from enforcing of adding namespace labels. + ## Works only if enforcedNamespaceLabel set to true. Make sure both ruleNamespace and ruleName are set for each pair + ## Deprecated, use `excludedFromEnforcement` instead + prometheusRulesExcludedFromEnforce: [] + + ## ExcludedFromEnforcement - list of object references to PodMonitor, ServiceMonitor, Probe and PrometheusRule objects + ## to be excluded from enforcing a namespace label of origin. + ## Works only if enforcedNamespaceLabel set to true. + ## See https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#objectreference + excludedFromEnforcement: [] + + ## QueryLogFile specifies the file to which PromQL queries are logged. Note that this location must be writable, + ## and can be persisted using an attached volume. Alternatively, the location can be set to a stdout location such + ## as /dev/stdout to log querie information to the default Prometheus log stream. This is only available in versions + ## of Prometheus >= 2.16.0. For more details, see the Prometheus docs (https://prometheus.io/docs/guides/query-log/) + queryLogFile: false + + # Use to set global sample_limit for Prometheus. This act as default SampleLimit for ServiceMonitor or/and PodMonitor. + # Set to 'false' to disable global sample_limit. or set to a number to override the default value. + sampleLimit: false + + # EnforcedKeepDroppedTargetsLimit defines on the number of targets dropped by relabeling that will be kept in memory. + # The value overrides any spec.keepDroppedTargets set by ServiceMonitor, PodMonitor, Probe objects unless spec.keepDroppedTargets + # is greater than zero and less than spec.enforcedKeepDroppedTargets. 0 means no limit. + enforcedKeepDroppedTargets: 0 + + ## EnforcedSampleLimit defines global limit on number of scraped samples that will be accepted. This overrides any SampleLimit + ## set per ServiceMonitor or/and PodMonitor. It is meant to be used by admins to enforce the SampleLimit to keep overall + ## number of samples/series under the desired limit. Note that if SampleLimit is lower that value will be taken instead. + enforcedSampleLimit: false + + ## EnforcedTargetLimit defines a global limit on the number of scraped targets. This overrides any TargetLimit set + ## per ServiceMonitor or/and PodMonitor. It is meant to be used by admins to enforce the TargetLimit to keep the overall + ## number of targets under the desired limit. Note that if TargetLimit is lower, that value will be taken instead, except + ## if either value is zero, in which case the non-zero value will be used. If both values are zero, no limit is enforced. + enforcedTargetLimit: false + + + ## Per-scrape limit on number of labels that will be accepted for a sample. If more than this number of labels are present + ## post metric-relabeling, the entire scrape will be treated as failed. 0 means no limit. Only valid in Prometheus versions + ## 2.27.0 and newer. + enforcedLabelLimit: false + + ## Per-scrape limit on length of labels name that will be accepted for a sample. If a label name is longer than this number + ## post metric-relabeling, the entire scrape will be treated as failed. 0 means no limit. Only valid in Prometheus versions + ## 2.27.0 and newer. + enforcedLabelNameLengthLimit: false + + ## Per-scrape limit on length of labels value that will be accepted for a sample. If a label value is longer than this + ## number post metric-relabeling, the entire scrape will be treated as failed. 0 means no limit. Only valid in Prometheus + ## versions 2.27.0 and newer. + enforcedLabelValueLengthLimit: false + + ## AllowOverlappingBlocks enables vertical compaction and vertical query merge in Prometheus. This is still experimental + ## in Prometheus so it may change in any upcoming release. + allowOverlappingBlocks: false + + ## Minimum number of seconds for which a newly created pod should be ready without any of its container crashing for it to + ## be considered available. Defaults to 0 (pod will be considered available as soon as it is ready). + minReadySeconds: 0 + + # Required for use in managed kubernetes clusters (such as AWS EKS) with custom CNI (such as calico), + # because control-plane managed by AWS cannot communicate with pods' IP CIDR and admission webhooks are not working + # Use the host's network namespace if true. Make sure to understand the security implications if you want to enable it. + # When hostNetwork is enabled, this will set dnsPolicy to ClusterFirstWithHostNet automatically. + hostNetwork: false + + # HostAlias holds the mapping between IP and hostnames that will be injected + # as an entry in the pod’s hosts file. + hostAliases: [] + # - ip: 10.10.0.100 + # hostnames: + # - a1.app.local + # - b1.app.local + + ## TracingConfig configures tracing in Prometheus. + ## See https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#prometheustracingconfig + tracingConfig: {} + + ## Defines the service discovery role used to discover targets from ServiceMonitor objects and Alertmanager endpoints. + ## If set, the value should be either “Endpoints” or “EndpointSlice”. If unset, the operator assumes the “Endpoints” role. + serviceDiscoveryRole: "" + + ## Additional configuration which is not covered by the properties above. (passed through tpl) + additionalConfig: {} + + ## Additional configuration which is not covered by the properties above. + ## Useful, if you need advanced templating inside alertmanagerSpec. + ## Otherwise, use prometheus.prometheusSpec.additionalConfig (passed through tpl) + additionalConfigString: "" + + ## Defines the maximum time that the `prometheus` container's startup probe + ## will wait before being considered failed. The startup probe will return + ## success after the WAL replay is complete. If set, the value should be + ## greater than 60 (seconds). Otherwise it will be equal to 900 seconds (15 + ## minutes). + maximumStartupDurationSeconds: 0 + + additionalRulesForClusterRole: [] + # - apiGroups: [ "" ] + # resources: + # - nodes/proxy + # verbs: [ "get", "list", "watch" ] + + additionalServiceMonitors: [] + ## Name of the ServiceMonitor to create + ## + # - name: "" + + ## Additional labels to set used for the ServiceMonitorSelector. Together with standard labels from + ## the chart + ## + # additionalLabels: {} + + ## Service label for use in assembling a job name of the form