Skip to content

Commit c399fe3

Browse files
committed
tests: Add some tests about NodePort services
1 parent 8f7b261 commit c399fe3

File tree

5 files changed

+223
-39
lines changed

5 files changed

+223
-39
lines changed

tests/post/features/network.feature

Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5,3 +5,23 @@ Feature: Network
55
And we run on an untainted single node
66
Then ports check succeed
77
And we have only expected processes listening
8+
9+
Scenario: Access using NodePort on workload-plane IP
10+
Given the Kubernetes API is available
11+
When we create a 'test-svc-1' NodePort service that expose a simple pod
12+
Then a request on the 'test-svc-1' NodePort on a workload-plane IP returns 200
13+
14+
Scenario: Access using NodePort on control-plane IP
15+
Given the Kubernetes API is available
16+
And the node control-plane IP is not equal to its workload-plane IP
17+
When we create a 'test-svc-2' NodePort service that expose a simple pod
18+
Then a request on the 'test-svc-2' NodePort on a control-plane IP should not return
19+
20+
Scenario: Expose NodePort on Control Plane
21+
Given the Kubernetes API is available
22+
And the node control-plane IP is not equal to its workload-plane IP
23+
When we create a 'test-svc-3' NodePort service that expose a simple pod
24+
And we set nodeport CIDRs to control-plane CIDR
25+
And we wait for the rollout of 'daemonset/kube-proxy' in namespace 'kube-system' to complete
26+
Then a request on the 'test-svc-3' NodePort on a control-plane IP returns 200
27+
And a request on the 'test-svc-3' NodePort on a workload-plane IP should not return

tests/post/steps/conftest.py

Lines changed: 11 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -123,6 +123,17 @@ def check_multi_node(k8s_client):
123123
pytest.skip("We skip single node cluster for this test")
124124

125125

126+
@given("the node control-plane IP is not equal to its workload-plane IP")
127+
def node_control_plane_ip_is_not_equal_to_its_workload_plane_ip(host):
128+
data = utils.get_grain(host, "metalk8s")
129+
130+
assert "control_plane_ip" in data
131+
assert "workload_plane_ip" in data
132+
133+
if data["control_plane_ip"] == data["workload_plane_ip"]:
134+
pytest.skip("Node control-plane IP is equal to node workload-plane IP")
135+
136+
126137
# }}}
127138

128139
# Then {{{

tests/post/steps/test_ingress.py

Lines changed: 4 additions & 37 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,3 @@
1-
import json
21
import os
32
import re
43
import requests
@@ -109,17 +108,6 @@ def teardown(context, host, ssh_config, version, k8s_client):
109108
re_configure_portmap(host, version, ssh_config)
110109

111110

112-
@given("the node control-plane IP is not equal to its workload-plane IP")
113-
def node_control_plane_ip_is_not_equal_to_its_workload_plane_ip(host):
114-
data = utils.get_grain(host, "metalk8s")
115-
116-
assert "control_plane_ip" in data
117-
assert "workload_plane_ip" in data
118-
119-
if data["control_plane_ip"] == data["workload_plane_ip"]:
120-
pytest.skip("Node control-plane IP is equal to node workload-plane IP")
121-
122-
123111
@given("a VIP for Control Plane Ingress is available")
124112
def we_have_a_vip(context):
125113
cp_ingress_vip = os.environ.get("CONTROL_PLANE_INGRESS_VIP")
@@ -147,7 +135,7 @@ def disable_metallb(host, context, ssh_config, version):
147135
"networks": {"controlPlane": {"metalLB": {"enabled": False}, "ingress": {}}}
148136
}
149137

150-
patch_bootstrap_config(context, host, bootstrap_patch)
138+
utils.patch_bootstrap_config(context, host, bootstrap_patch)
151139
re_configure_cp_ingress(host, version, ssh_config, context=context)
152140

153141

@@ -259,7 +247,7 @@ def update_cp_ingress_ip(host, context, ssh_config, version, node_name):
259247

260248
bootstrap_patch = {"networks": {"controlPlane": {"ingress": {"ip": ip}}}}
261249

262-
patch_bootstrap_config(context, host, bootstrap_patch)
250+
utils.patch_bootstrap_config(context, host, bootstrap_patch)
263251
re_configure_cp_ingress(host, version, ssh_config, context=context)
264252

265253

@@ -273,7 +261,7 @@ def update_control_plane_ingress_ip(host, context, ssh_config, version, ip):
273261
}
274262
}
275263

276-
patch_bootstrap_config(context, host, bootstrap_patch)
264+
utils.patch_bootstrap_config(context, host, bootstrap_patch)
277265
re_configure_cp_ingress(host, version, ssh_config, context=context)
278266

279267

@@ -288,7 +276,7 @@ def update_portmap_cidr(host, context, ssh_config, version, plane):
288276

289277
bootstrap_patch = {"networks": {"portmap": {"cidr": new_cidrs}}}
290278

291-
patch_bootstrap_config(context, host, bootstrap_patch)
279+
utils.patch_bootstrap_config(context, host, bootstrap_patch)
292280
re_configure_portmap(host, version, ssh_config, context=context)
293281

294282

@@ -404,27 +392,6 @@ def get_node_hosting_cp_ingress_vip(k8s_client):
404392
return match.group("node")
405393

406394

407-
def patch_bootstrap_config(context, host, patch):
408-
if "bootstrap_to_restore" not in context:
409-
with host.sudo():
410-
cmd_ret = host.check_output("salt-call --out json --local temp.dir")
411-
412-
tmp_dir = json.loads(cmd_ret)["local"]
413-
414-
with host.sudo():
415-
host.check_output("cp /etc/metalk8s/bootstrap.yaml {}".format(tmp_dir))
416-
417-
context["bootstrap_to_restore"] = os.path.join(tmp_dir, "bootstrap.yaml")
418-
419-
with host.sudo():
420-
host.check_output(
421-
"salt-call --local --retcode-passthrough state.single "
422-
"file.serialize /etc/metalk8s/bootstrap.yaml "
423-
"dataset='{}' "
424-
"merge_if_exists=True".format(json.dumps(patch))
425-
)
426-
427-
428395
def re_configure_cp_ingress(host, version, ssh_config, context=None):
429396
with host.sudo():
430397
host.check_output(

tests/post/steps/test_network.py

Lines changed: 166 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1,16 +1,60 @@
11
import json
2+
import requests
23

34
import pytest
4-
from pytest_bdd import given, scenario, then
5+
from pytest_bdd import given, scenario, when, then, parsers
56

6-
from tests import utils
7+
from tests import utils, kube_utils
78

89

910
@scenario("../features/network.feature", "All expected listening processes")
1011
def test_all_listening_processes(host):
1112
pass
1213

1314

15+
@scenario("../features/network.feature", "Access using NodePort on workload-plane IP")
16+
def test_access_nodeport_wp(host, teardown):
17+
pass
18+
19+
20+
@scenario("../features/network.feature", "Access using NodePort on control-plane IP")
21+
def test_access_nodeport_cp(host, teardown):
22+
pass
23+
24+
25+
@scenario("../features/network.feature", "Expose NodePort on Control Plane")
26+
def test_change_nodeport_cidrs(host, teardown):
27+
pass
28+
29+
30+
@pytest.fixture(scope="function")
31+
def context():
32+
return {}
33+
34+
35+
@pytest.fixture
36+
def teardown(context, host, ssh_config, version, k8s_client):
37+
yield
38+
for svc_name in context.get("svc_to_delete", []):
39+
k8s_client.resources.get(api_version="v1", kind="Pod").delete(
40+
name=f"{svc_name}-pod", namespace="default"
41+
)
42+
k8s_client.resources.get(api_version="v1", kind="Service").delete(
43+
name=svc_name, namespace="default"
44+
)
45+
46+
if "bootstrap_to_restore" in context:
47+
with host.sudo():
48+
host.check_output(
49+
"cp {} /etc/metalk8s/bootstrap.yaml".format(
50+
context["bootstrap_to_restore"]
51+
)
52+
)
53+
54+
if context.get("reconfigure_nodeport"):
55+
re_configure_nodeport(host, version, ssh_config)
56+
57+
1458
@given("we run on an untainted single node")
1559
def running_on_single_node_untainted(k8s_client):
1660
nodes = k8s_client.resources.get(api_version="v1", kind="Node").get()
@@ -21,6 +65,67 @@ def running_on_single_node_untainted(k8s_client):
2165
assert not nodes.items[0].spec.taints, "Single node should be untainted"
2266

2367

68+
@when(
69+
parsers.parse("we create a '{svc_name}' NodePort service that expose a simple pod")
70+
)
71+
def create_nodeport_svc(context, k8s_client, utils_manifest, svc_name):
72+
utils_manifest["metadata"]["name"] = f"{svc_name}-pod"
73+
utils_manifest["metadata"]["labels"] = {"app": f"{svc_name}-app"}
74+
utils_manifest["spec"]["containers"][0]["command"] = [
75+
"python3",
76+
"-m",
77+
"http.server",
78+
"8080",
79+
]
80+
81+
svc_manifest = {
82+
"apiVersion": "v1",
83+
"kind": "Service",
84+
"metadata": {"name": svc_name},
85+
"spec": {
86+
"type": "NodePort",
87+
"selector": {"app": f"{svc_name}-app"},
88+
"ports": [{"port": 8080}],
89+
},
90+
}
91+
92+
context.setdefault("svc_to_delete", []).append(svc_name)
93+
94+
k8s_client.resources.get(api_version="v1", kind="Pod").create(
95+
body=utils_manifest, namespace="default"
96+
)
97+
k8s_client.resources.get(api_version="v1", kind="Service").create(
98+
body=svc_manifest, namespace="default"
99+
)
100+
101+
utils.retry(
102+
kube_utils.check_pod_status(
103+
k8s_client,
104+
f"{svc_name}-pod",
105+
namespace="default",
106+
state="Running",
107+
),
108+
times=10,
109+
wait=3,
110+
name=f"wait for Pod 'default/{svc_name}-pod'",
111+
)
112+
113+
114+
@when(parsers.parse("we set nodeport CIDRs to {plane} CIDR"))
115+
def update_nodeport_cidr(host, context, ssh_config, version, plane):
116+
pillar = {
117+
"workload-plane": "networks:workload_plane:cidr",
118+
"control-plane": "networks:control_plane:cidr",
119+
}
120+
121+
new_cidrs = utils.get_pillar(host, pillar[plane])
122+
123+
bootstrap_patch = {"networks": {"nodeport": {"cidr": new_cidrs}}}
124+
125+
utils.patch_bootstrap_config(context, host, bootstrap_patch)
126+
re_configure_nodeport(host, version, ssh_config, context=context)
127+
128+
24129
@then("ports check succeed")
25130
def check_ports(host, ssh_config):
26131
with host.sudo():
@@ -121,3 +226,62 @@ def check_all_listening_process(host, version, control_plane_ingress_ip):
121226
)
122227

123228
assert not errors, "\n".join(errors)
229+
230+
231+
@then(
232+
parsers.parse(
233+
"a request on the '{svc_name}' NodePort on a {plane} IP returns {status_code}"
234+
)
235+
)
236+
def nodeport_service_request_return(k8s_client, host, svc_name, plane, status_code):
237+
response = do_nodeport_service_request(k8s_client, host, plane, svc_name)
238+
assert response is not None
239+
assert response.status_code == int(status_code)
240+
241+
242+
@then(
243+
parsers.parse(
244+
"a request on the '{svc_name}' NodePort on a {plane} IP should not return"
245+
)
246+
)
247+
def nodeport_service_request_does_not_respond(k8s_client, host, svc_name, plane):
248+
try:
249+
response = do_nodeport_service_request(k8s_client, host, plane, svc_name)
250+
assert (
251+
False
252+
), f"Server should not answer but got {response.status_code}: {response.reason}"
253+
except:
254+
pass
255+
256+
257+
def do_nodeport_service_request(k8s_client, host, plane, svc_name):
258+
grains = {
259+
"workload-plane": "metalk8s:workload_plane_ip",
260+
"control-plane": "metalk8s:control_plane_ip",
261+
}
262+
263+
if plane not in grains:
264+
raise NotImplementedError
265+
266+
ip = utils.get_grain(host, grains[plane])
267+
268+
svc = k8s_client.resources.get(api_version="v1", kind="Service").get(
269+
name=svc_name, namespace="default"
270+
)
271+
port = svc["spec"]["ports"][0]["nodePort"]
272+
273+
return requests.get(f"http://{ip}:{port}")
274+
275+
276+
def re_configure_nodeport(host, version, ssh_config, context=None):
277+
command = [
278+
"salt-run",
279+
"state.sls",
280+
"metalk8s.kubernetes.kube-proxy.deployed",
281+
f"saltenv=metalk8s-{version}",
282+
]
283+
284+
utils.run_salt_command(host, command, ssh_config)
285+
286+
if context is not None:
287+
context["reconfigure_nodeport"] = True

tests/utils.py

Lines changed: 22 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -3,6 +3,7 @@
33
import json
44
import logging
55
import re
6+
import os
67
import operator
78
import testinfra
89
import time
@@ -189,6 +190,27 @@ def get_pillar(host, key, local=False):
189190
return json.loads(output)["local"]
190191

191192

193+
def patch_bootstrap_config(context, host, patch):
194+
if "bootstrap_to_restore" not in context:
195+
with host.sudo():
196+
cmd_ret = host.check_output("salt-call --out json --local temp.dir")
197+
198+
tmp_dir = json.loads(cmd_ret)["local"]
199+
200+
with host.sudo():
201+
host.check_output("cp /etc/metalk8s/bootstrap.yaml {}".format(tmp_dir))
202+
203+
context["bootstrap_to_restore"] = os.path.join(tmp_dir, "bootstrap.yaml")
204+
205+
with host.sudo():
206+
host.check_output(
207+
"salt-call --local --retcode-passthrough state.single "
208+
"file.serialize /etc/metalk8s/bootstrap.yaml "
209+
"dataset='{}' "
210+
"merge_if_exists=True".format(json.dumps(patch))
211+
)
212+
213+
192214
class BaseAPIError(Exception):
193215
"""Some error occurred when using a `BaseAPI` subclass."""
194216

0 commit comments

Comments
 (0)