-
Notifications
You must be signed in to change notification settings - Fork 100
/
Pulumi.stackname.yaml.example
373 lines (334 loc) · 16.4 KB
/
Pulumi.stackname.yaml.example
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
################################################################################
# This configuration file is a template that is intended to show all of the
# valid options. There are two ways to use this file:
#
# 1. You can allow the scripts to create their own configuration; this results
# in defaults being used for any configuration value that has a default, and
# any required variables will be prompted for.
# 2. You can manually edit this file for your environment; to do this you need to
# create a file in this configuration directory. This file needs to be named
# Pulumi.stackname.yaml, where stackname is replaced with the name of the stack
# you are configuring.
#
# NOTE: Currently, many of the stacks stood up by this process have sanity checks
# that will fill in default values if values are not found in this file.
#
# IMPORTANT NOTE ON NAMESPACE NAMES!
#
# You CANNOT name your namespace with the same name as a Pulumi provider. As an
# example, you cannot call your AWS namespace "aws" or your Digital Ocean
# namespace "digitalocean". If you do so you will get undefined and bizarre errors
# back from Pulumi at runtime.
#
# Known verboten namespaces:
# - aws
# - digitalocean
# - gcp
# - linode
# - azure
#
# This list is subject to change and may not be complete; it is recommended that if
# you experience errors with a provider you check this.
#
# One important exception; if you are providing auth credentials for a given
# provider you will likely be defining them into that providers namespace. So, the
# Digital Ocean token does go in "digitalocean:token".
#
# So far 3 maintainers have fallen into this trap and spent hours trying to sort it.
# Hopefully this helps you from falling into the same trap.
#
################################################################################
config:
############################################################################
# Bank of Sirius (Sample Application) Settings
############################################################################
# These parameters define the name of the database and the database credentials
# used by the Bank of Sirius ledger application.
#
# Note that the encrypted password is a required value; Pulumi will abort the Bank of Sirius
# deployment if no password is provided.
# sirius:ledger_pwd: Password # Required
sirius:ledger_admin: admin
sirius:ledger_db: postgresdb
# This optional parameter supplies a hostname for the Bank of Sirius Ingress
# controller. If not set, the FQDN of the LB is used.
#sirius:hostname: demo.example.com
# These parameters define the name of the database and the database credentials
# used by the Bank of Sirius accounts application.
#
# Note that the encrypted password is a required value; Pulumi will abort the Bank of Sirius
# deployment if no password is provided.
#sirius:accounts_pwd: Password # Required
sirius:accounts_admin: admin
sirius:accounts_db: postgresdb
# Prometheus Configuration
sirius:chart_version: 2.3.5
# Chart version for the Pulumi chart for prometheus
sirius:helm_repo_name: prometheus-community
# Name of the repo to pull the prometheus chart from
sirius:helm_repo_url: https://prometheus-community.github.io/helm-charts
############################################################################
# AWS Access Settings
############################################################################
# If you have an AWS profile defined, you can configure it here instead of using an environment variable.
aws:profile: myProfile
# Your AWS region can be configured here instead of using an environment variable.
aws:region: us-east-1
############################################################################
# K8 Infrastructure Settings
############################################################################
# NOTE: There are currently two valid values for this configuration variable; you can
# define to use AWS or kubeconfig. If not set, this value will be requested
# in the run of start.sh and will be written to the file.
#
# kubernetes:infra_type: AWS
# kubernetes:infra_type: kubeconfig
# kubernetes:infra_type: DO
#
# NOTE: For kubernetes installations that are using the kubeconfig file, there are three
# other variables that need to be set. This includes the fully qualified path to the
# kubeconfig file, the name of the cluster you will be using, and the name of the
# context you are using.
#
# kubernetes:kubeconfig: /full/path/to/kube/config
# kubernetes:cluster_name: name-of-cluster
# kubernetes:context_name: name-of-context
############################################################################
# AWS: VPC Settings
############################################################################
# An optional array of availability zone ids to create subnets for can be
# specified here. The default value is all the availability zones associated
# with a region.
vpc:azs:
- us-west-1a
- us-west-1b
############################################################################
# AWS: Elastic Kubernetes Service (EKS) Settings
############################################################################
# This is the Kubernetes version to install using EKS.
eks:k8s_version: 1.21
# This is the default instance type used by EKS.
eks:instance_type: t2.large
# The minimum number of compute instances to provision for the EKS cluster.
eks:min_size: 3
# The maximum size of the EKS cluster.
eks:max_size: 12
# The desired capacity of the EKS cluster.
eks:desired_capacity: 3
############################################################################
# NGINX Kubernetes Ingress Controller (KIC) Settings
############################################################################
# The following settings (kic-helm:*) define the properties of the helm chart
# used to deploy the ingress controller. Note, the version specified here is
# different from the version of the ingress controller itself. Rather, this
# is the version of the helm chart which describes *how* to deploy the ingress
# controller.
#
# NOTE: If you choose to deploy onto a cluster defined by a kubeconfig file, the
# only currently supported deployment method is to use a JWT to deploy a
# NGINX Plus IC. This is a temporary configuration that will be updated to
# fully support all of the deployment methods that the AWS infrastructure
# supports.
#
# NGINX Kubernetes Ingress Controller (KIC) Configuration
# Chart name for the helm chart for kic
kic-helm:chart_name: nginx-ingress
# Chart version for the helm chart for kic
kic-helm:chart_version: 0.15.2
# Name of the repo to pull the kic chart from
kic-helm:helm_repo_name: nginx-stable
# URL of the chart repo to pull kic from
kic-helm:helm_repo_url: https://helm.nginx.com/stable
# Timeout value for helm operations in seconds
kic-helm:helm_timeout: 300
# When set to true, the ingress controller Helm chart will deploy with
# NGINX plus support enabled.
kic-helm:enable_plus: false
# Acceptable values are default value is (oss_image):
# source - build the KIC image from source code
# registry - download and use an NGINX Ingress Controller container image from a public registry
kic:image_origin: registry
# Full repository and tag path for ingress controller image
# For example, if you have a subscription to NGINX Plus Ingress Controller
# and have configured the Docker client certificates such that you can
# access the NGINX Docker registry, you can specify the NGINX Plus
# image name here. See this page for configuration documentation:
# https://docs.nginx.com/nginx-ingress-controller/installation/pulling-ingress-controller-image/
#
# The following are all valid image names:
# kic:image_name: private-registry.nginx.com/nginx-ic/nginx-plus-ingress:2.4.2
# kic:image_name: private-registry.nginx.com/nginx-ic/nginx-plus-ingress:2.4.2-ot
# kic:image_name: docker.io/nginx/nginx-ingress:2.4.2
# kic:image_name: nginx/nginx-ingress:2.4.2
# kic:image_name: nginx/nginx-ingress:2.4.2-alpine
kic:image_name: nginx/nginx-ingress:2.4.2
############################################################################
# Options for building from oss_image (WIP)
############################################################################
############################################################################
# Options for building from source
############################################################################
# This parameter informs the image creation build script what type of
# Docker image to build. You will need to check the source code to know
# exactly what targets are available. As of 1.11.2, the following make
# targets are available:
# debian-image (default)
# alpine-image
# debian-image-plus
# debian-image-nap-plus
# openshift-image
# openshift-image-plus
# openshift-image-nap-plus
# debian-image-opentracing
# debian-image-opentracing-plus
kic:make_target: debian-image
# By default the latest version of the NGINX Kubernetes Ingress Controller
# source code will be downloaded and built unless an alternative URL is
# provided for the kic_src_url parameter. To use the default, just omit this key.
# URLs can point to a directory path on the local filesystem, tar.gz archive, or to a
# git repository. Specify a tag/commit/branch for a git repository URL in the fragment.
#
# Example URLs:
#
# HTTP/HTTPS url pointing to a tar.gz archive:
# https://github.com/nginxinc/kubernetes-ingress/archive/refs/tags/v1.11.3.tar.gz
#
# tar.gz archive on the local filesystem:
# file:///var/tmp/v1.11.3.tar.gz
# /var/tmp/v1.11.3.tar.gz
#
# Directory containing the source tree on the local filesystem:
# file:///var/tmp/kubernetes-ingress-1.11.3
# /var/tmp/kubernetes-ingress-1.11.3
#
# Github URL without a tag specified:
# https://github.com/nginxinc/kubernetes-ingress.git
# [email protected]:nginxinc/kubernetes-ingress.git
# ssh://[email protected]:nginxinc/kubernetes-ingress.git
#
# Github URL with a tag specified:
# https://github.com/nginxinc/kubernetes-ingress.git#v2.2.0
# [email protected]:nginxinc/kubernetes-ingress.git#v2.2.0
# ssh://[email protected]:nginxinc/kubernetes-ingress.git#v2.2.0
kic:src_url: https://github.com/nginxinc/kubernetes-ingress.git#v2.2.0
# When set to true, Pulumi's diff logic is circumvented and the image will always be
# rebuilt regardless of the input variables to Pulumi being the same or not.
kic:always_rebuild: false
# When the block below is defined and the make_target is set to an NGINX plus image,
# NGINX Plus will be built.
kic:nginx_plus:
# Path on the executing system's filesystem of the NGINX repository key.
kic:key_path: /etc/ssl/nginx/nginx-repo.key
# Path on the executing system's filesystem of the NGINX repository certificate.
kic:cert_path: /etc/ssl/nginx/nginx-repo.crt
############################################################################
# Logagent Deployment (Filebeat)
############################################################################
# Logagent Configuration
logagent:chart_name: filebeat
# Chart name for the helm chart for the logagent
logagent:chart_version: 7.17.3
# Chart version for the helm chart for the logagent
logagent:helm_repo_name: elastic
# Name of the repo to pull the logagent from
logagent:helm_repo_url: https://helm.elastic.co
# URL of the chart repo to pull the logagent from
logagent:helm_timeout: 300
# Timeout value for helm operations in seconds
############################################################################
# Logstore Deployment (Elasticsearch)
############################################################################
# Logstore Configuration
logstore:chart_name: elasticsearch
# Chart name for the helm chart for the logstore
logstore:chart_version: 19.4.4
# Chart version for the helm chart for the logstore
logstore:helm_repo_name: bitnami
# Name of the repo to pull the logstore from
logstore:helm_repo_url: https://charts.bitnami.com/bitnami
# URL of the chart repo to pull the logstore from
logstore:helm_timeout: 300
# Timeout value for helm operations in seconds
# NOTE: Because of the wide variety of kubernetes clusters that we deploy to
# we have provided the ability to tune the ELK stack appropriately. By
# default we deploy 1 each of the ingest, data, master, and coordinating
# nodes. This is sufficient for basic POC testing but will most likely
# fall over with heavy usage.
logstore:master_replicas: 3
# Number of master replicas to deploy
logstore:data_replicas: 3
# Number of data replicas to deploy
logstore:ingest_replicas: 2
# Number of ingest replicas to deploy
logstore:coordinating_replicas: 2
# Number of coordinating replicas to deploy
############################################################################
# Certificate Management (cert-manager) Deployment
############################################################################
# Cert Manager Configuration
certmgr:chart_name: cert-manager
# Chart hame for the helm chart for certmanager
certmgr:chart_version: v1.10.0
# Chart version for the helm chart for certmanager
certmgr:certmgr_helm_repo_name: jetstack
# Name of the repo to pull the certmanager chart from
certmgr:certmgr_helm_repo_url: https://charts.jetstack.io
# URL of the chart repo to pull certmanager from
certmgr:helm_timeout: 300
# Timeout value for helm operations in seconds
############################################################################
# Prometheus Deployment (kube-prometheus-stack)
############################################################################
# Prometheus Configuration
prometheus:chart_name: kube-prometheus-stack
# Chart name for the helm chart for prometheus
prometheus:chart_version: 41.5.0
# Chart version for the helm chart for prometheus
prometheus:helm_repo_name: prometheus-community
# Name of the repo to pull the prometheus chart from
prometheus:helm_repo_url: https://https://prometheus-community.github.io/helm-charts
# URL of the chart repo
prometheus:statsd_chart_name: prometheus-statsd-exporter
# Name of the statsd chart (uses the same repo as the prom chart)
prometheus.statsd_chart_version: 0.6.2
# Version of the statsd chart (uses the same repo as the prom chart)
prometheus:helm_timeout: 300
# Timeout value for helm operations in seconds
#
prometheus:adminpass: strongpass
# Grafana Configuration
# NOTE: The password for the grafana installation; note that this is not exposed
# to the internet and requires kubeproxy to access. However, this should be
# encrypted which is dependent on Pulumi addressing hierarchical config files
# (for the ideal solution). See the anthos subdir for details.
############################################################################
# Bank of Sirius Configuration
############################################################################
# NOTE: The parameters for the Bank of Sirius are set in a configuration directory
# within that project.
############################################################################
# Digital Ocean Managed Kubernetes and Container Registry
############################################################################
# This is the Kubernetes version to install using Digital Ocean K8s.
docean:k8s_version: 1.22.12-do.0
# This is the default instance type used by Digital Ocean K8s.
docean:instance_size: s-4vcpu-8gb
# The desired node count of the Digital Ocean K8s cluster.
docean:node_count: 3
# The region to deploy the cluster
docean:region: sfo3
# Subscription tier for container registry
digitalocean:container_registry_subscription_tier: starter
############################################################################
# Linode Kubernetes Engine
############################################################################
# This is the Kubernetes version to install using Linode K8s.
linode:k8s_version: 1.23
# This is the default instance type used Linode Kubernetes
linode:instance_type: g6-standard-8
# The desired node count of the Linode K8s cluster.
linode:node_count: 3
# The region to deploy the cluster
linode:region: us-central
# Flag to enable or disable HA mode for the Kubernetes cluster
linode:k8s_ha: true