-
Notifications
You must be signed in to change notification settings - Fork 103
/
Copy pathkubernetes-extras.sh
executable file
·235 lines (203 loc) · 9.28 KB
/
kubernetes-extras.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
#!/usr/bin/env bash
set -o errexit # abort on nonzero exit status
set -o nounset # abort on unbound variable
set -o pipefail # don't hide errors within pipes
# Don't pollute console output with upgrade notifications
export PULUMI_SKIP_UPDATE_CHECK=true
# Run Pulumi non-interactively
export PULUMI_SKIP_CONFIRMATIONS=true
script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
echo " "
echo "IMPORTANT NOTICE!"
echo "===================================================================================================="
echo " This script and the associated Pulumi projects are deprecated and will be removed in a future "
echo " release as they are outside of the scope of the MARA project."
echo " "
echo " The MARA team no longer tests or updates these scripts, so please review before running if you "
echo " decide that you want to use them."
echo " "
echo " For more information, please see Discussion #155 in the repository (nginx.com/mara)"
echo "===================================================================================================="
sleep 5
# Check to see if the venv has been installed, since this is only going to be used to start pulumi/python based
# projects.
#
if ! command -v "${script_dir}/../pulumi/python/venv/bin/python" >/dev/null; then
echo "NOTICE! Unable to find the vnev directory. This is required for the pulumi/python deployment process."
echo "Please run ./setup_venv.sh from this directory to install the required virtual environment."
echo " "
exit 1
else
echo "Adding to [${script_dir}/venv/bin] to PATH"
export PATH="${script_dir}/../pulumi/python/venv/bin:$PATH"
fi
if ! command -v pulumi >/dev/null; then
if [ -x "${script_dir}/../pulumi/python/venv/bin/pulumi" ]; then
echo "Adding to [${script_dir}/venv/bin] to PATH"
export PATH="${script_dir}/../pulumi/python/venv/bin:$PATH"
if ! command -v pulumi >/dev/null; then
echo >&2 "Pulumi must be installed to continue"
exit 1
fi
else
echo >&2 "Pulumi must be installed to continue"
exit 1
fi
fi
function retry() {
local -r -i max_attempts="$1"
shift
local -i attempt_num=1
until "$@"; do
if ((attempt_num == max_attempts)); then
echo "Attempt ${attempt_num} failed and there are no more attempts left!"
return 1
else
echo "Attempt ${attempt_num} failed! Trying again in $attempt_num seconds..."
sleep $((attempt_num++))
fi
done
}
script_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" >/dev/null 2>&1 && pwd)"
echo " "
echo "NOTICE! The stack name provided here should be different from the stack name you use for your main"
echo "deployment. This stack is only used to stand up missing features into your kubernetes cluster, and "
echo "these features should not be added/removed as part of testing."
echo " "
echo "It is recommended you use the name of your cluster for this stack name, but any unique name will work."
echo " "
echo "Because of this, there is not a convenience script to remove these features. However, you can remove "
echo "them manually using the _pulumi destroy_ command."
echo " "
# Sleep so that this is seen...
sleep 5
if [ ! -f "${script_dir}/../pulumi/python/tools/common/config/environment" ]; then
touch "${script_dir}/../pulumi/python/tools/common/config/environment"
fi
if ! grep --quiet '^PULUMI_STACK=.*' "${script_dir}/../pulumi/python/tools/common/config/environment"; then
read -r -e -p "Enter the name of the Pulumi stack to use in tool installation: " PULUMI_STACK
echo "PULUMI_STACK=${PULUMI_STACK}" >>"${script_dir}/../pulumi/python/tools/common/config/environment"
fi
source "${script_dir}/../pulumi/python/tools/common/config/environment"
echo "Configuring all tool installations to use the stack: ${PULUMI_STACK}"
# Create the stack if it does not already exist
find "${script_dir}/../pulumi/python/tools" -mindepth 2 -maxdepth 2 -type f -name Pulumi.yaml -execdir pulumi stack select --create "${PULUMI_STACK}" \;
echo " "
echo "NOTICE! When using a kubeconfig file you need to ensure that your environment is configured to"
echo "connect to Kubernetes properly. If you have multiple kubernetes contexts (or custom contexts)"
echo "you may need to remove them and replace them with a simple ~/.kube/config file. This will be "
echo "addressed in a future release."
echo " "
echo "This value is used solely for the installation of the extra tools and is not persisted to the main"
echo "configuration file."
echo " "
# Sleep so that this is seen...
sleep 5
if pulumi config get kubernetes:kubeconfig -C ${script_dir}/../pulumi/python/tools/common >/dev/null 2>&1; then
echo "Kubeconfig file found"
else
echo "Provide an absolute path to your kubeconfig file"
pulumi config set kubernetes:kubeconfig -C ${script_dir}/../pulumi/python/tools/common
fi
# Clustername
if pulumi config get kubernetes:cluster_name -C ${script_dir}/../pulumi/python/tools/common >/dev/null 2>&1; then
echo "Clustername found"
else
echo "Provide your clustername"
pulumi config set kubernetes:cluster_name -C ${script_dir}/../pulumi/python/tools/common
fi
# Contextname
# TODO: Update process to use context name as well as kubeconfig and clustername #84
if pulumi config get kubernetes:context_name -C ${script_dir}/../pulumi/python/tools/common >/dev/null 2>&1; then
echo "Context name found"
else
echo "Provide your context name"
pulumi config set kubernetes:context_name -C ${script_dir}/../pulumi/python/tools/common
fi
# Set our variables
kubeconfig="$(pulumi config get kubernetes:kubeconfig -C ${script_dir}/../pulumi/python/tools/common)"
cluster_name="$(pulumi config get kubernetes:cluster_name -C ${script_dir}/../pulumi/python/tools/common)"
context_name="$(pulumi config get kubernetes:context_name -C ${script_dir}/../pulumi/python/tools/common)"
# Show our config...based on the kubeconfig file
if command -v kubectl >/dev/null; then
echo "Attempting to connect to kubernetes cluster"
retry 30 kubectl --kubeconfig="${kubeconfig}" config view
fi
# Connect to the cluster
if command -v kubectl >/dev/null; then
echo "Attempting to connect to kubernetes cluster"
retry 30 kubectl --kubeconfig="${kubeconfig}" --cluster="${cluster_name}" --context="${context_name}" version >/dev/null
fi
echo " "
echo "For installations that are lacking persistent volume support or egress support this script will help the user install"
echo "the necessary packages. Note that this is not the only way to do this, and also be aware that this may not be the best"
echo "solution for your environment. Ideally, your kubernetes installation already has these features enabled/installed."
echo " "
# Sleep so we are seen
sleep 5
while true; do
read -r -e -p "Do you wish to install metallb? " yn
case $yn in
[Yy]*)
echo "Checking for necessary values in the configuration:"
pulumi config set metallb:enabled -C ${script_dir}/../pulumi/python/tools/common enabled >/dev/null 2>&1
if pulumi config get metallb:thecidr -C ${script_dir}/../pulumi/python/tools/common >/dev/null 2>&1; then
echo "CIDR found"
else
echo "Provide your CIDR (Note: no validation is done on this data)"
pulumi config set metallb:thecidr -C ${script_dir}/../pulumi/python/tools/common
fi
break
;;
[Nn]*) # If they don't want metallb, but have a value in there we delete it
pulumi config rm metallb:thecidr -C ${script_dir}/../pulumi/python/tools/common >/dev/null 2>&1
pulumi config rm metallb:enabled -C ${script_dir}/../pulumi/python/tools/common >/dev/null 2>&1
break
;;
*) echo "Please answer yes or no." ;;
esac
done
while true; do
read -r -e -p "Do you wish to install nfs client support for persistent volumes? " yn
case $yn in
[Yy]*)
echo "Checking for necessary values in the configuration:"
pulumi config set nfsvols:enabled -C ${script_dir}/../pulumi/python/tools/common enabled >/dev/null 2>&1
if pulumi config get nfsvols:nfsserver -C ${script_dir}/../pulumi/python/tools/common >/dev/null 2>&1; then
echo "NFS Server IP found"
else
echo "Provide your NFS Server IP (Note: no validation is done on this data)"
pulumi config set nfsvols:nfsserver -C ${script_dir}/../pulumi/python/tools/common
fi
if pulumi config get nfsvols:nfspath -C ${script_dir}/../pulumi/python/tools/common >/dev/null 2>&1; then
echo "NFS Share Path found"
else
echo "Provide your NFS Share Path (Note: no validation is done on this data)"
pulumi config set nfsvols:nfspath -C ${script_dir}/../pulumi/python/tools/common
fi
break
;;
[Nn]*) # If they don't want nfsvols, but have a value in there we delete it
pulumi config rm nfsvols:nfsserver -C ${script_dir}/../pulumi/python/tools/common >/dev/null 2>&1
pulumi config rm nfsvols:nfspath -C ${script_dir}/../pulumi/python/tools/common >/dev/null 2>&1
pulumi config rm nfsvols:enabled -C ${script_dir}/../pulumi/python/tools/common >/dev/null 2>&1
break
;;
*) echo "Please answer yes or no." ;;
esac
done
pulumi_args="--emoji "
if pulumi config get metallb:enabled -C ${script_dir}/../pulumi/python/tools/common >/dev/null 2>&1; then
echo "====================="
echo "| MetalLB |"
echo "====================="
cd "${script_dir}/../pulumi/python/tools/metallb"
pulumi $pulumi_args up
fi
if pulumi config get nfsvols:enabled -C ${script_dir}/../pulumi/python/tools/common >/dev/null 2>&1; then
echo "====================="
echo "| NFSVols |"
echo "====================="
cd "${script_dir}/../pulumi/python/tools/nfsvolumes"
pulumi $pulumi_args up
fi