1
1
import json
2
+ import requests
2
3
3
4
import pytest
4
- from pytest_bdd import given , scenario , then
5
+ from pytest_bdd import given , scenario , when , then , parsers
5
6
6
- from tests import utils
7
+ from tests import utils , kube_utils
7
8
8
9
9
10
@scenario ("../features/network.feature" , "All expected listening processes" )
10
11
def test_all_listening_processes (host ):
11
12
pass
12
13
13
14
15
+ @scenario ("../features/network.feature" , "Access using NodePort on workload-plane IP" )
16
+ def test_access_nodeport_wp (host , teardown ):
17
+ pass
18
+
19
+
20
+ @scenario ("../features/network.feature" , "Access using NodePort on control-plane IP" )
21
+ def test_access_nodeport_cp (host , teardown ):
22
+ pass
23
+
24
+
25
+ @scenario ("../features/network.feature" , "Expose NodePort on Control Plane" )
26
+ def test_change_nodeport_cidrs (host , teardown ):
27
+ pass
28
+
29
+
30
+ @pytest .fixture (scope = "function" )
31
+ def context ():
32
+ return {}
33
+
34
+
35
+ @pytest .fixture
36
+ def teardown (context , host , ssh_config , version , k8s_client ):
37
+ yield
38
+ for svc_name in context .get ("svc_to_delete" , []):
39
+ k8s_client .resources .get (api_version = "v1" , kind = "Pod" ).delete (
40
+ name = f"{ svc_name } -pod" , namespace = "default"
41
+ )
42
+ k8s_client .resources .get (api_version = "v1" , kind = "Service" ).delete (
43
+ name = svc_name , namespace = "default"
44
+ )
45
+
46
+ if "bootstrap_to_restore" in context :
47
+ with host .sudo ():
48
+ host .check_output (
49
+ "cp {} /etc/metalk8s/bootstrap.yaml" .format (
50
+ context ["bootstrap_to_restore" ]
51
+ )
52
+ )
53
+
54
+ if context .get ("reconfigure_nodeport" ):
55
+ re_configure_nodeport (host , version , ssh_config )
56
+
57
+
14
58
@given ("we run on an untainted single node" )
15
59
def running_on_single_node_untainted (k8s_client ):
16
60
nodes = k8s_client .resources .get (api_version = "v1" , kind = "Node" ).get ()
@@ -21,6 +65,67 @@ def running_on_single_node_untainted(k8s_client):
21
65
assert not nodes .items [0 ].spec .taints , "Single node should be untainted"
22
66
23
67
68
+ @when (
69
+ parsers .parse ("we create a '{svc_name}' NodePort service that expose a simple pod" )
70
+ )
71
+ def create_nodeport_svc (context , k8s_client , utils_manifest , svc_name ):
72
+ utils_manifest ["metadata" ]["name" ] = f"{ svc_name } -pod"
73
+ utils_manifest ["metadata" ]["labels" ] = {"app" : f"{ svc_name } -app" }
74
+ utils_manifest ["spec" ]["containers" ][0 ]["command" ] = [
75
+ "python3" ,
76
+ "-m" ,
77
+ "http.server" ,
78
+ "8080" ,
79
+ ]
80
+
81
+ svc_manifest = {
82
+ "apiVersion" : "v1" ,
83
+ "kind" : "Service" ,
84
+ "metadata" : {"name" : svc_name },
85
+ "spec" : {
86
+ "type" : "NodePort" ,
87
+ "selector" : {"app" : f"{ svc_name } -app" },
88
+ "ports" : [{"port" : 8080 }],
89
+ },
90
+ }
91
+
92
+ context .setdefault ("svc_to_delete" , []).append (svc_name )
93
+
94
+ k8s_client .resources .get (api_version = "v1" , kind = "Pod" ).create (
95
+ body = utils_manifest , namespace = "default"
96
+ )
97
+ k8s_client .resources .get (api_version = "v1" , kind = "Service" ).create (
98
+ body = svc_manifest , namespace = "default"
99
+ )
100
+
101
+ utils .retry (
102
+ kube_utils .check_pod_status (
103
+ k8s_client ,
104
+ f"{ svc_name } -pod" ,
105
+ namespace = "default" ,
106
+ state = "Running" ,
107
+ ),
108
+ times = 10 ,
109
+ wait = 3 ,
110
+ name = f"wait for Pod 'default/{ svc_name } -pod'" ,
111
+ )
112
+
113
+
114
+ @when (parsers .parse ("we set nodeport CIDRs to {plane} CIDR" ))
115
+ def update_nodeport_cidr (host , context , ssh_config , version , plane ):
116
+ pillar = {
117
+ "workload-plane" : "networks:workload_plane:cidr" ,
118
+ "control-plane" : "networks:control_plane:cidr" ,
119
+ }
120
+
121
+ new_cidrs = utils .get_pillar (host , pillar [plane ])
122
+
123
+ bootstrap_patch = {"networks" : {"nodeport" : {"cidr" : new_cidrs }}}
124
+
125
+ utils .patch_bootstrap_config (context , host , bootstrap_patch )
126
+ re_configure_nodeport (host , version , ssh_config , context = context )
127
+
128
+
24
129
@then ("ports check succeed" )
25
130
def check_ports (host , ssh_config ):
26
131
with host .sudo ():
@@ -121,3 +226,62 @@ def check_all_listening_process(host, version, control_plane_ingress_ip):
121
226
)
122
227
123
228
assert not errors , "\n " .join (errors )
229
+
230
+
231
+ @then (
232
+ parsers .parse (
233
+ "a request on the '{svc_name}' NodePort on a {plane} IP returns {status_code}"
234
+ )
235
+ )
236
+ def nodeport_service_request_return (k8s_client , host , svc_name , plane , status_code ):
237
+ response = do_nodeport_service_request (k8s_client , host , plane , svc_name )
238
+ assert response is not None
239
+ assert response .status_code == int (status_code )
240
+
241
+
242
+ @then (
243
+ parsers .parse (
244
+ "a request on the '{svc_name}' NodePort on a {plane} IP should not return"
245
+ )
246
+ )
247
+ def nodeport_service_request_does_not_respond (k8s_client , host , svc_name , plane ):
248
+ try :
249
+ response = do_nodeport_service_request (k8s_client , host , plane , svc_name )
250
+ assert (
251
+ False
252
+ ), f"Server should not answer but got { response .status_code } : { response .reason } "
253
+ except :
254
+ pass
255
+
256
+
257
+ def do_nodeport_service_request (k8s_client , host , plane , svc_name ):
258
+ grains = {
259
+ "workload-plane" : "metalk8s:workload_plane_ip" ,
260
+ "control-plane" : "metalk8s:control_plane_ip" ,
261
+ }
262
+
263
+ if plane not in grains :
264
+ raise NotImplementedError
265
+
266
+ ip = utils .get_grain (host , grains [plane ])
267
+
268
+ svc = k8s_client .resources .get (api_version = "v1" , kind = "Service" ).get (
269
+ name = svc_name , namespace = "default"
270
+ )
271
+ port = svc ["spec" ]["ports" ][0 ]["nodePort" ]
272
+
273
+ return requests .get (f"http://{ ip } :{ port } " )
274
+
275
+
276
+ def re_configure_nodeport (host , version , ssh_config , context = None ):
277
+ command = [
278
+ "salt-run" ,
279
+ "state.sls" ,
280
+ "metalk8s.kubernetes.kube-proxy.deployed" ,
281
+ f"saltenv=metalk8s-{ version } " ,
282
+ ]
283
+
284
+ utils .run_salt_command (host , command , ssh_config )
285
+
286
+ if context is not None :
287
+ context ["reconfigure_nodeport" ] = True
0 commit comments