diff --git a/k8s/lifemonitor-web/Chart.yaml b/k8s/lifemonitor-web/Chart.yaml index 1376eda..f534530 100644 --- a/k8s/lifemonitor-web/Chart.yaml +++ b/k8s/lifemonitor-web/Chart.yaml @@ -1,5 +1,5 @@ apiVersion: v1 -appVersion: "0.1.0" +appVersion: '0.5.4' description: A Helm chart for Kubernetes name: lifemonitor-web -version: 0.1.0 +version: 0.3.0 diff --git a/k8s/lifemonitor-web/templates/_helpers.tpl b/k8s/lifemonitor-web/templates/_helpers.tpl index 25b365a..587b23d 100644 --- a/k8s/lifemonitor-web/templates/_helpers.tpl +++ b/k8s/lifemonitor-web/templates/_helpers.tpl @@ -25,3 +25,12 @@ Create chart name and version as used by the chart label. {{- define "lifemonitor-web.chart" -}} {{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} {{- end -}} + + +{{/* +Define the service name +*/}} +{{- define "lifemonitor-web.serviceName" -}} +{{- printf "%s-frontend" (include "lifemonitor-web.fullname" .) -}} +{{- end -}} + diff --git a/k8s/lifemonitor-web/templates/deployment.yaml b/k8s/lifemonitor-web/templates/deployment.yaml index a7446b1..2160716 100644 --- a/k8s/lifemonitor-web/templates/deployment.yaml +++ b/k8s/lifemonitor-web/templates/deployment.yaml @@ -4,33 +4,42 @@ metadata: name: {{ include "lifemonitor-web.fullname" . }}-frontend labels: app.kubernetes.io/name: {{ include "lifemonitor-web.name" . }} - helm.sh/chart: {{ include "lifemonitor-web.chart" . }} app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/managed-by: {{ .Release.Service }} + helm.sh/chart: {{ include "lifemonitor-web.chart" . }} spec: replicas: {{ .Values.replicaCount }} selector: matchLabels: app.kubernetes.io/name: {{ include "lifemonitor-web.name" . }} app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + helm.sh/chart: {{ include "lifemonitor-web.chart" . }} template: metadata: labels: app.kubernetes.io/name: {{ include "lifemonitor-web.name" . }} app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + helm.sh/chart: {{ include "lifemonitor-web.chart" . }} spec: containers: - - name: {{ .Chart.Name }} + - name: frontend image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}" imagePullPolicy: {{ .Values.image.pullPolicy }} ports: - name: http containerPort: 4200 protocol: TCP + {{ if .Values.monitoring.enabled }} + - name: metrics + containerPort: 9090 + protocol: TCP + {{- end }} volumeMounts: - name: frontend-config mountPath: /app/assets/config.json - subPath: config.json + subPath: config.json - name: nginx-config mountPath: /usr/local/openresty/nginx/conf/nginx.conf subPath: nginx.conf @@ -39,6 +48,9 @@ spec: subPath: app.conf - name: logs-storage mountPath: /var/log/nginx + {{ if .Values.extraVolumeMounts }} + {{- toYaml .Values.extraVolumeMounts | nindent 12 }} + {{- end }} # livenessProbe: # httpGet: @@ -58,9 +70,12 @@ spec: - name: nginx-config configMap: name: {{ include "lifemonitor-web.fullname" . }}-nginx-config - defaultMode: 0644 + defaultMode: 0644 - name: logs-storage emptyDir: {} + {{ if .Values.extraVolumes }} + {{- toYaml .Values.extraVolumes | nindent 8 }} + {{- end }} {{- with .Values.nodeSelector }} nodeSelector: {{- toYaml . | nindent 8 }} diff --git a/k8s/lifemonitor-web/templates/ingress.yaml b/k8s/lifemonitor-web/templates/ingress.yaml index f14dc6e..2379f28 100644 --- a/k8s/lifemonitor-web/templates/ingress.yaml +++ b/k8s/lifemonitor-web/templates/ingress.yaml @@ -1,20 +1,31 @@ -{{- if .Values.ingress.enabled -}} -{{- $fullName := include "lifemonitor-web.fullname" . -}} +{{- $fullname := include "lifemonitor-web.fullname" . -}} +{{- $newStyle := semverCompare ">=1.18.0" .Capabilities.KubeVersion.GitVersion -}} +{{- if semverCompare ">=1.17-0" .Capabilities.KubeVersion.GitVersion }} +apiVersion: networking.k8s.io/v1 +{{- else }} apiVersion: extensions/v1beta1 +{{- end }} kind: Ingress metadata: - name: {{ $fullName }} + name: {{ $fullname }} labels: app.kubernetes.io/name: {{ include "lifemonitor-web.name" . }} helm.sh/chart: {{ include "lifemonitor-web.chart" . }} app.kubernetes.io/instance: {{ .Release.Name }} app.kubernetes.io/managed-by: {{ .Release.Service }} - {{- with .Values.ingress.annotations }} annotations: - {{- toYaml . | nindent 4 }} - {{- end }} + {{- with .Values.ingress.annotations }} + {{- toYaml . | nindent 4 }} + {{- end }} + # old way of setting the ingress class + {{- if semverCompare "<1.18.0" .Capabilities.KubeVersion.GitVersion }} + kubernetes.io/ingress.class: {{ .Values.ingress.className }} + {{- end }} spec: -{{- if .Values.ingress.tls }} + {{- if semverCompare ">=1.18.0" .Capabilities.KubeVersion.GitVersion }} + ingressClassName: {{ .Values.ingress.className }} + {{- end }} + {{- if .Values.ingress.tls }} tls: {{- range .Values.ingress.tls }} - hosts: @@ -23,17 +34,26 @@ spec: {{- end }} secretName: {{ .secretName }} {{- end }} -{{- end }} + {{- end }} rules: {{- range .Values.ingress.hosts }} - - host: {{ .host | quote }} - http: - paths: - {{- range .paths }} - - path: {{ . }} - backend: - serviceName: {{ $fullName }}-frontend - servicePort: http + - host: {{ .host | quote }} + http: + paths: + {{- range .paths }} + - path: {{ . | quote }} + {{- if $newStyle }} + pathType: Prefix {{- end }} + backend: + {{- if $newStyle }} + service: + name: {{$fullname}}-frontend + port: + name: http + {{- else }} + serviceName: {{$fullname}}-frontend + servicePort: http + {{- end }} + {{- end }} {{- end }} -{{- end }} diff --git a/k8s/lifemonitor-web/templates/monitoring.yaml b/k8s/lifemonitor-web/templates/monitoring.yaml new file mode 100644 index 0000000..70f2814 --- /dev/null +++ b/k8s/lifemonitor-web/templates/monitoring.yaml @@ -0,0 +1,50 @@ +{{- if .Values.monitoring.enabled -}} +apiVersion: v1 +kind: Service +metadata: + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: '9090' + labels: + app.kubernetes.io/name: {{ include "lifemonitor-web.name" . }} + name: {{ .Release.Name }}-frontend-proxy-metrics-headless + namespace: {{ .Release.Namespace }} +spec: + clusterIP: None + selector: + app.kubernetes.io/name: {{ include "lifemonitor-web.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + helm.sh/chart: {{ include "lifemonitor-web.chart" . }} + ports: + - name: metrics + protocol: TCP + port: 9090 + targetPort: 9090 + +--- + +{{- if .Values.monitoring.servicemonitor.enabled -}} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "lifemonitor-web.name" . }}-metrics-servicemonitor + # same namespace that Prometheus is running in + namespace: {{ .Values.monitoring.prometheus.namespace }} + labels: + app: {{ include "lifemonitor-web.name" . }} + release: prometheus-stack +spec: + selector: + matchLabels: + app.kubernetes.io/name: {{ include "lifemonitor-web.name" . }} + endpoints: + - path: /metrics + port: metrics + interval: 15s + namespaceSelector: + matchNames: + - {{ .Release.Namespace }} # namespace where the app is running +{{- end -}} # end if .Values.monitoring.servicemonitor.enabled +{{- end -}} # end if .Values.monitoring.enabled + diff --git a/k8s/lifemonitor-web/templates/nginx.configmap.yml b/k8s/lifemonitor-web/templates/nginx.configmap.yml index bcd5258..a128130 100644 --- a/k8s/lifemonitor-web/templates/nginx.configmap.yml +++ b/k8s/lifemonitor-web/templates/nginx.configmap.yml @@ -13,17 +13,28 @@ data: upstream api { # fail_timeout=0 means we always retry an upstream even if it failed # to return a good HTTP response - hash $binary_remote_addr consistent; + hash $binary_remote_addr consistent; server api-backend:8000 fail_timeout=0; + keepalive 32; } + upstream api-static { + # fail_timeout=0 means we always retry an upstream even if it failed + # to return a good HTTP response + hash $binary_remote_addr consistent; + server api-nginx:80 fail_timeout=0; + + keepalive 32; + } + # Set upstream server for the LifeMonitor Back-End upstream wss { # fail_timeout=0 means we always retry an upstream even if it failed # to return a good HTTP response hash $binary_remote_addr consistent; server api-wss:8001 fail_timeout=0; + keepalive 32; } @@ -32,22 +43,46 @@ data: '' close; } - # Log format globally enabled - # log_format extended '$ip2location $remote_addr - $remote_user [$time_local] ' - # '"$request" $status $body_bytes_sent ' - # '"$http_referer" "$http_user_agent" "$http_x_forwarded_for"'; - + {{- if .Values.monitoring.enabled -}} server { - server_name {{ .Values.externalServerName }}; + server_name {{ .Values.externalServerName }} {{ include "lifemonitor-web.name" . }}-metrics.default {{ include "lifemonitor-web.name" . }}-metrics.default.svc.cluster.local; # save logs here - access_log /var/log/nginx/app.access.log extended; - error_log /var/log/nginx/app.error.log error; + access_log /var/log/nginx/metrics.access.log extended; + error_log /var/log/nginx/metrics.error.log; proxy_read_timeout 600; proxy_connect_timeout 600; - proxy_send_timeout 600; - + proxy_send_timeout 600; + + listen 9090; + client_max_body_size 4G; + + # set the correct host(s) for your site + #server_name localhost; + keepalive_timeout 60; + etag on; + + # ssl_certificate /nginx/certs/lm.crt; + # ssl_certificate_key /nginx/certs/lm.key; + + # force HTTP traffic to HTTPS + # error_page 497 http://$host:4200$request_uri; + + # expose extended metrics + location = /metrics { + stub_status on; + } + } + {{- end -}} + + server { + server_name {{ .Values.externalServerName }} {{ include "lifemonitor-web.name" . }}-metrics.default {{ include "lifemonitor-web.name" . }}-metrics.default.svc.cluster.local; + + proxy_read_timeout 600; + proxy_connect_timeout 600; + proxy_send_timeout 600; + listen 4200; client_max_body_size 4G; @@ -55,42 +90,41 @@ data: #server_name localhost; keepalive_timeout 60; etag on; - + # ssl_certificate /nginx/certs/lm.crt; # ssl_certificate_key /nginx/certs/lm.key; # force HTTP traffic to HTTPS error_page 497 http://$host:4200$request_uri; - # expose standard nginx metrics - location /nginx-metrics { - stub_status on; + # disable standard nginx metrics + location = /nginx-metrics { + deny all; } - # expose extended metrics - location /metrics { - default_type text/html; - content_by_lua_block { - metric_connections:set(ngx.var.connections_reading, {"reading"}) - metric_connections:set(ngx.var.connections_waiting, {"waiting"}) - metric_connections:set(ngx.var.connections_writing, {"writing"}) - prometheus:collect() - } + # disable /metrics endpoint + location = /metrics { + deny all; } - + # wrap api location ^~ /api/ { + + rewrite /api/(.*) /$1 break; + proxy_redirect off; - + # set uppstream proxy_pass https://api/; # rewrite headers # proxy_pass_header Server; + proxy_set_header X-Real-IP $remote_addr; proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; proxy_set_header X-Scheme $scheme; - + proxy_set_header Cookie $http_cookie; + # proxy_http_version 1.1; # proxy_set_header Upgrade $http_upgrade; # proxy_set_header Connection 'upgrade'; @@ -100,17 +134,34 @@ data: proxy_connect_timeout 600; proxy_read_timeout 600; proxy_send_timeout 600; - - # monitor requests - log_by_lua_block { - update_metrics("webapp.api") - } } - location ~ ^/(account|static|oauth2|jobs) { + location ~ ^/(static|openapi.*) { + # disable redirects + proxy_redirect off; + + # set uppstream + proxy_pass http://api-static; + + # rewrite headers + # proxy_pass_header Server; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Scheme $scheme; + proxy_set_header Host $host; + proxy_set_header Cookie $http_cookie; + + # various proxy settings + proxy_connect_timeout 600; + proxy_read_timeout 600; + proxy_send_timeout 600; + #proxy_intercept_errors on; + } + + location ~ ^/(account|oauth2|jobs|github|integrations) { # disable redirects proxy_redirect off; - + # set uppstream proxy_pass https://api; @@ -120,17 +171,13 @@ data: proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; proxy_set_header X-Scheme $scheme; proxy_set_header Host $host; + proxy_set_header Cookie $http_cookie; # various proxy settings proxy_connect_timeout 600; proxy_read_timeout 600; proxy_send_timeout 600; #proxy_intercept_errors on; - - # monitor requests - log_by_lua_block { - update_metrics("webapp.api") - } } location /socket.io/ { @@ -143,31 +190,24 @@ data: proxy_set_header Upgrade $http_upgrade; proxy_set_header Connection $connection_upgrade; proxy_set_header Host $host; - - # Callback di evento per le connessioni WebSocket - access_by_lua_block { - http_websocket_connections_total:inc(1, {'webapp'}) - } - log_by_lua_block { - http_websocket_connections_total:inc(-1, {'webapp'}) - } + proxy_set_header Cookie $http_cookie; } location / { # resolver 127.0.0.11 ipv6=off valid=30s; proxy_redirect off; - + proxy_set_header X-Real-IP $remote_addr; proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; proxy_set_header X-Scheme $scheme; proxy_set_header Host {{ .Values.externalServerName }}; #proxy_set_header X-NginX-Proxy true; - + proxy_connect_timeout 600; proxy_read_timeout 600; proxy_send_timeout 600; - proxy_intercept_errors on; + #proxy_intercept_errors on; access_log off; @@ -183,17 +223,15 @@ data: pcre_jit on; # logs - pid /var/log/nginx/nginx.pid; - error_log /var/log/nginx/nginx.error.log; - - + pid /var/run/openresty/nginx.pid; + error_log /var/log/nginx/nginx.error.log crit; events { worker_connections 1024; } http { - + include mime.types; default_type application/octet-stream; @@ -202,178 +240,22 @@ data: # When the use of underscores is disabled, request header fields whose names contain underscores are marked as invalid and become subject to the ignore_invalid_headers directive. # underscores_in_headers off; + server_names_hash_bucket_size 128; + server_names_hash_max_size 512; + proxy_headers_hash_max_size 512; proxy_headers_hash_bucket_size 128; # Define a verbose log format - log_format extended '$remote_addr ' - '[$time_local, $http_x_request_timezone, "$http_x_request_country_long", "$http_x_request_country_short", "$http_x_request_region", "$http_x_request_city", $http_x_request_zipcode, "$http_x_request_isp", $http_x_request_latitude, $http_x_request_longitude] ' - '"$request" $status $body_bytes_sent ' - '"$http_referer" "$http_user_agent" "$http_x_request_domain"'; - - # Configure Log files - access_log /var/log/nginx/nginx.access.log extended; - error_log /var/log/nginx/nginx.error.log; - - # Initialize Promethues Metrics - lua_shared_dict prometheus_metrics 10M; + log_format extended + '[$http_x_forwarded_for] ' + '"$request" $status ($body_bytes_sent bytes) - "$http_referer", "$http_user_agent", "$http_x_request_domain"'; - - init_worker_by_lua_block { - - -- init Promethues - prometheus = require("prometheus").init("prometheus_metrics") - - -- init ip2location - ip2location = require('ip2location') - ip2loc = ip2location:new('/usr/share/ip2location/DB11.IPV6.BIN') - - -- init metrics - metric_requests = prometheus:counter( - "nginx_http_requests_total", "Number of HTTP requests", {"host", "status"}) - metric_latency = prometheus:histogram( - "nginx_http_request_duration_seconds", "HTTP request latency", {"host"}) - metric_connections = prometheus:gauge( - "nginx_http_connections", "Number of HTTP connections", {"state"}) - - -- counters - http_requests_total = prometheus:counter( - "http_requests_total", "Total number of HTTP requests", - { - "server", "client_ip", "client_isp", "client_domain", - "client_country_short", "client_country_long", - "client_latitude", "client_longitude", - "scheme", "method", "request", "request_type", "status" - }) - - http_requests_client_error_total = prometheus:counter( - "http_requests_client_error_total", "Total number of HTTP client errors", {"host", "method", "status"}) - - http_request_duration_seconds = prometheus:histogram( - "http_request_duration_seconds", "HTTP request duration", {"host", "method", "status"}) - - http_websocket_connections_total = prometheus:gauge( - "http_websocket_connections_total", "Total number of active websocket connections", {"name"}) - - - http_response_size_bytes = prometheus:histogram( - "http_response_size_bytes", "HTTP response size", {"status"}) - http_requests_failed_total = prometheus:counter( - "http_requests_failed_total", "Total number of failed HTTP requests", {"method", "status"}) - http_requests_redirect_total = prometheus:counter( - "http_requests_redirect_total", "Total number of redirected HTTP requests", {"method", "status"}) - - http_requests_server_error_total = prometheus:counter( - "http_requests_server_error_total", "Total number of HTTP server errors", {"method", "status"}) - http_request_size_bytes = prometheus:histogram( - "http_request_size_bytes", "HTTP request size", {"method"}) - http_connections_total = prometheus:gauge( - "http_connections_total", "Total number of HTTP connections", {"type"}) - http_connections_active = prometheus:gauge( - "http_connections_active", "Number of active HTTP connections", {"type"}) - http_upstream_response_time_seconds = prometheus:histogram( - "http_upstream_response_time_seconds", "HTTP upstream response time", {"upstream"}) - http_upstream_connections_total = prometheus:gauge( - "http_upstream_connections_total", "Total number of upstream connections", {"upstream"}) - http_upstream_bytes_received_total = prometheus:counter( - "http_upstream_bytes_received_total", "Total number of bytes received from upstream", {"upstream"}) - http_upstream_bytes_sent_total = prometheus:counter( - "http_upstream_bytes_sent_total", "Total number of bytes sent to upstream", {"upstream"}) - http_cache_hit_total = prometheus:counter( - "http_cache_hit_total", "Total number of cache hits", {"status"}) - http_cache_miss_total = prometheus:counter( - "http_cache_miss_total", "Total number of cache misses", {"status"}) - http_cache_stale_total = prometheus:counter( - "http_cache_stale_total", "Total number of stale cache responses", {"status"}) - } - - - - init_by_lua_block { - - function update_metrics(request_type) - local server_name = ngx.var.server_name - local client_ip = ngx.var.remote_addr - -- request - local request_scheme = ngx.var.scheme - local request_uri = ngx.var.uri - local request_method = ngx.var.request_method - local request_size = tonumber(ngx.var.request_length) - local request_time = tonumber(ngx.var.request_time) - -- detect request location - local location = ip2loc:get_all('8.8.8.8') - -- response - local response_size = tonumber(ngx.var.bytes_sent) - local response_status = tonumber(ngx.var.status) - -- upstream - local upstream = ngx.var.upstream_name - local upstream_response_time = tonumber(ngx.var.upstream_response_time) - -- cache - local cache_status = ngx.var.upstream_cache_status - - -- update metrics - metric_requests:inc(1, {server_name, response_status}) - metric_latency:observe(tonumber(request_time), {server_name}) - if response_status >= 400 and response_status < 500 then - http_requests_client_error_total:inc(1, {server_name, request_method, response_status}) - elseif response_status >= 500 then - http_requests_server_error_total:inc(1, {server_name, request_method, response_status}) - else - http_requests_total:inc(1, { - server_name, client_ip, location.isp, location.domain, - location.country_short, location.country_long, - location.latitude, location.longitude, - request_scheme, request_method, request_uri, request_type, response_status - }) - if string.find(request_uri, 'socket.io') then - http_websocket_connections_total:inc(1, {'Socket X'}) - end - -- http_request_duration_seconds:observe(request_time, {request_method, response_status}) - -- http_response_size_bytes:observe(response_size, {response_status}) - -- http_request_size_bytes:observe(request_size, {request_method}) - -- if response_status == 200 and cache_status == "MISS" then - -- http_cache_miss_total:inc(1, {response_status}) - -- elseif response_status == 200 and cache_status == "HIT" then - -- http_cache_hit_total:inc(1, {response_status}) - -- elseif response_status == 200 and cache_status == "STALE" then - -- http_cache_stale_total:inc(1, {response_status}) - -- end - end - - -- client IP - ngx.req.set_header('X-Request-IP', location.remote_addr) - ngx.req.set_header('X-Request-ISP', location.isp) - ngx.req.set_header('X-Request-Domain', location.domain) - -- client region - ngx.req.set_header('X-Request-Country-Short', location.country_short) - ngx.req.set_header('X-Request-Country-Long', location.country_long) - ngx.req.set_header('X-Request-Region', location.region) - ngx.req.set_header('X-Request-City', location.city) - ngx.req.set_header('X-Request-ZipCode', location.zipcode) - ngx.req.set_header('X-Request-Timezone', location.timezone) - -- client geo coordinates - ngx.req.set_header('X-Request-Latitude', location.latitude) - ngx.req.set_header('X-Request-Longitude', location.longitude) - end - } - - - access_by_lua_block { - -- register_websocket('webappxxxxx', 'socket.io') - local server_name = ngx.var.server_name - local method = ngx.var.request_method - local status = tonumber(ngx.var.status) - local request_time = tonumber(ngx.var.request_time) - local response_size = tonumber(ngx.var.bytes_sent) - local upstream_response_time = tonumber(ngx.var.upstream_response_time) - local request_size = tonumber(ngx.var.request_length) - local upstream = ngx.var.upstream_name - local cache_status = ngx.var.upstream_cache_status - -- local location = ip2loc:get_all('8.8.8.8') - } + # Configure Log files + access_log /usr/local/openresty/nginx/logs/access.log extended; + # error_log /usr/local/openresty/nginx/logs/error.log warn; - # See Move default writable paths to a dedicated directory (#119) - # https://github.com/openresty/docker-openresty/issues/119 + # Extra settings client_body_temp_path /var/run/openresty/nginx-client-body; proxy_temp_path /var/run/openresty/nginx-proxy; fastcgi_temp_path /var/run/openresty/nginx-fastcgi; diff --git a/k8s/lifemonitor-web/values.yaml b/k8s/lifemonitor-web/values.yaml index d32813f..098541a 100644 --- a/k8s/lifemonitor-web/values.yaml +++ b/k8s/lifemonitor-web/values.yaml @@ -9,50 +9,61 @@ image: tag: latest pullPolicy: IfNotPresent -nameOverride: "" -fullnameOverride: "" +nameOverride: '' +fullnameOverride: '' externalServerName: 'localhost' # Setting for the LifeMonitor Backend API backend: - apiUrl: "" - clientId: "" + apiUrl: '' + clientId: '' service: type: NodePort port: 80 +monitoring: + enabled: false + servicemonitor: + enabled: false + prometheus: + namespace: kube-prometheus-stack + ingress: enabled: false - annotations: { - kubernetes.io/ingress.class: nginx - # kubernetes.io/tls-acme: "true" - # hosts: - # - host: app.lifemonitor.eu - # paths: - # - "/" - } - - tls: [] - # - secretName: chart-example-tls - # hosts: - # - chart-example.local + className: nginx + annotations: {} + # kubernetes.io/tls-acme: "true" + # configures the backend service + hosts: + - host: localhost + paths: + - '/' + # configure TLS for the ingress + tls: + - secretName: lifemonitor-web-tls + hosts: + - localhost resources: {} - # We usually recommend not to specify default resources and to leave this as a conscious - # choice for the user. This also increases chances charts run on environments with little - # resources, such as Minikube. If you do want to specify resources, uncomment the following - # lines, adjust them as necessary, and remove the curly braces after 'resources:'. - # limits: - # cpu: 100m - # memory: 128Mi - # requests: - # cpu: 100m - # memory: 128Mi +# We usually recommend not to specify default resources and to leave this as a conscious +# choice for the user. This also increases chances charts run on environments with little +# resources, such as Minikube. If you do want to specify resources, uncomment the following +# lines, adjust them as necessary, and remove the curly braces after 'resources:'. +# limits: +# cpu: 100m +# memory: 128Mi +# requests: +# cpu: 100m +# memory: 128Mi nodeSelector: {} tolerations: [] affinity: {} + +extraVolumes: [] + +extraVolumeMounts: [] diff --git a/k8s/values.yaml b/k8s/values.yaml index 7b89831..89fed17 100644 --- a/k8s/values.yaml +++ b/k8s/values.yaml @@ -9,12 +9,14 @@ image: tag: latest pullPolicy: Always -nameOverride: "" -fullnameOverride: "" +nameOverride: '' +fullnameOverride: '' + +externalServerName: 'localhost' # Setting for the LifeMonitor Backend API backend: - apiUrl: "https://api.lifemonitor.eu" + apiUrl: 'https://api.lifemonitor.eu' clientId: service: @@ -23,34 +25,45 @@ service: ingress: enabled: false - annotations: { - kubernetes.io/ingress.class: nginx - # kubernetes.io/tls-acme: "true" - # hosts: - # - host: app.lifemonitor.eu - # paths: - # - "/" - } - - tls: [] - # - secretName: chart-example-tls - # hosts: - # - chart-example.local + className: nginx + annotations: {} + # kubernetes.io/tls-acme: "true" + # configures the backend service + hosts: + - host: localhost + paths: + - '/' + # configure TLS for the ingress + tls: + - secretName: lifemonitor-web-tls + hosts: + - localhost + +monitoring: + enabled: false + servicemonitor: + enabled: false + prometheus: + namespace: kube-prometheus-stack resources: {} - # We usually recommend not to specify default resources and to leave this as a conscious - # choice for the user. This also increases chances charts run on environments with little - # resources, such as Minikube. If you do want to specify resources, uncomment the following - # lines, adjust them as necessary, and remove the curly braces after 'resources:'. - # limits: - # cpu: 100m - # memory: 128Mi - # requests: - # cpu: 100m - # memory: 128Mi +# We usually recommend not to specify default resources and to leave this as a conscious +# choice for the user. This also increases chances charts run on environments with little +# resources, such as Minikube. If you do want to specify resources, uncomment the following +# lines, adjust them as necessary, and remove the curly braces after 'resources:'. +# limits: +# cpu: 100m +# memory: 128Mi +# requests: +# cpu: 100m +# memory: 128Mi nodeSelector: {} tolerations: [] affinity: {} + +extraVolumes: [] + +extraVolumeMounts: [] diff --git a/ngsw-config.json b/ngsw-config.json index 48e57da..cfcf80a 100644 --- a/ngsw-config.json +++ b/ngsw-config.json @@ -37,7 +37,10 @@ "/oauth2/**", "/jobs/**", "/socket.io/**", - "/openapi.*" + "/github/**", + "/integrations/**", + "/openapi.*", + "/metrics" ], "cacheConfig": { "strategy": "freshness",