-
Notifications
You must be signed in to change notification settings - Fork 0
/
values.yaml
164 lines (146 loc) · 4.28 KB
/
values.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
# Default values for kubecache.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
replicaCount: 1
image:
repository: udhos/kubecache
pullPolicy: IfNotPresent
# Overrides the image tag whose default is the chart appVersion.
tag: ""
imagePullSecrets: []
nameOverride: ""
fullnameOverride: ""
serviceAccount:
# Specifies whether a service account should be created
create: true
# Annotations to add to the service account
annotations: {}
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
name: ""
# redeploy:
#
# 'always': adds a random annotation to Deployment in
# order to redeploy the pods whenever the chart is reapplied.
#
# 'reconfig': adds annotations with checksums of configurations in
# order to redeploy the pods whenever configuration is changed.
#
# https://v3.helm.sh/docs/howto/charts_tips_and_tricks/#automatically-roll-deployments
#
redeploy: always
podAnnotations:
"sidecar.istio.io/inject": "true"
"sidecar.istio.io/interceptionMode": "TPROXY" # REDIRECT or TPROXY
"prometheus.io/scrape": "true"
"prometheus.io/path": /metrics
"prometheus.io/port": "3000"
podSecurityContext: {}
# fsGroup: 2000
securityContext: {}
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 1000
ingress:
enabled: false
className: ""
annotations: {}
# kubernetes.io/ingress.class: nginx
# kubernetes.io/tls-acme: "true"
hosts:
- host: chart-example.local
paths:
- path: /
pathType: ImplementationSpecific
tls: []
# - secretName: chart-example-tls
# hosts:
# - chart-example.local
# add custom labels to deployment pods
customLabels:
app: kubecache
resources:
requests:
cpu: 300m
memory: 200Mi
ephemeral-storage: 200Mi
limits:
cpu: 2000m
memory: 400Mi
ephemeral-storage: 200Mi
autoscaling:
enabled: true
minReplicas: 1
maxReplicas: 100
targetCPUUtilizationPercentage: 80
apiVersion: autoscaling/v2
nodeSelector: {}
tolerations: []
affinity: {}
service:
type: ClusterIP
port: 9000
targetPort: 8080
podHealthCheck:
port: 8888
path: /health
#
# See: https://stackoverflow.com/questions/72816925/helm-templating-in-configmap-for-values-yaml
#
configMapProperties:
AUTOMEMLIMIT_DEBUG: "true"
#SECRET_ROLE_ARN: ""
#DEBUG_LOG: "true"
#LISTEN_ADDR: ":8080"
#BACKEND_URL: "http:https://config-server:9000"
#
# only requests matching both RESTRICT_ROUTE_REGEXP and RESTRICT_METHOD are cached.
# *empty* list means match *anything*.
#
#RESTRICT_ROUTE_REGEXP: '["^/develop", "^/homolog", "^/prod", "/develop/?$", "/homolog/?$", "/prod/?$"]'
#RESTRICT_METHOD: '["GET", "HEAD"]'
#
#BACKEND_TIMEOUT: 300s
#CACHE_TTL: 300s
#CACHE_ERROR_TTL: 60s
#HEALTH_ADDR: ":8888"
#HEALTH_PATH: /health
#METRICS_ADDR: ":3000"
#METRICS_PATH: /metrics
#METRICS_NAMESPACE: ""
#METRICS_BUCKETS_LATENCY_HTTP: "0.00001, 0.000025, 0.00005, 0.001, 0.0025, 0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1.0, 2.5, 5, 10, 25, 50, 100, 250, 500, 1000"
#GROUPCACHE_VERSION: "3"
#GROUPCACHE_PORT: :5000
#GROUPCACHE_SIZE_BYTES: "100000000"
#KUBEGROUP_METRICS_NAMESPACE: ""
#KUBEGROUP_DEBUG: "true"
#KUBEGROUP_LABEL_SELECTOR: "app=kubecache"
OTEL_TRACES_SAMPLER: parentbased_traceidratio
OTEL_TRACES_SAMPLER_ARG: "0.01"
# pick one of OTEL_SERVICE_NAME or OTEL_RESOURCE_ATTRIBUTES
#OTEL_SERVICE_NAME: mynamespace.kubecache
#OTEL_RESOURCE_ATTRIBUTES: 'service.name=mynamespace.kubecache,key2=value2'
#
# General configuration: https://opentelemetry.io/docs/concepts/sdk-configuration/general-sdk-configuration/
# Exporter configuration: https://opentelemetry.io/docs/concepts/sdk-configuration/otlp-exporter-configuration/
#
# Jaeger:
OTELCONFIG_EXPORTER: jaeger
OTEL_TRACES_EXPORTER: jaeger
OTEL_PROPAGATORS: b3multi
OTEL_EXPORTER_OTLP_ENDPOINT: http:https://jaeger-collector:14268
#
# OTLP gRPC:
#OTELCONFIG_EXPORTER: grpc
#OTEL_TRACES_EXPORTER: otlp
#OTEL_PROPAGATORS: b3multi
#OTEL_EXPORTER_OTLP_ENDPOINT: http:https://jaeger-collector:4317
#
# OTLP HTTP:
#OTELCONFIG_EXPORTER: http
#OTEL_TRACES_EXPORTER: otlp
#OTEL_PROPAGATORS: b3multi
#OTEL_EXPORTER_OTLP_ENDPOINT: http:https://jaeger-collector:4318