@@ -560,7 +560,7 @@ def compare_config():
560
560
561
561
pg_patch_config ["spec" ]["patroni" ]["slots" ][slot_to_change ]["database" ] = "bar"
562
562
del pg_patch_config ["spec" ]["patroni" ]["slots" ][slot_to_remove ]
563
-
563
+
564
564
k8s .api .custom_objects_api .patch_namespaced_custom_object (
565
565
"acid.zalan.do" , "v1" , "default" , "postgresqls" , "acid-minimal-cluster" , pg_delete_slot_patch )
566
566
@@ -577,7 +577,7 @@ def compare_config():
577
577
578
578
self .eventuallyEqual (lambda : self .query_database (leader .metadata .name , "postgres" , get_slot_query % ("database" , slot_to_change ))[0 ], "bar" ,
579
579
"The replication slot cannot be updated" , 10 , 5 )
580
-
580
+
581
581
# make sure slot from Patroni didn't get deleted
582
582
self .eventuallyEqual (lambda : len (self .query_database (leader .metadata .name , "postgres" , get_slot_query % ("slot_name" , patroni_slot ))), 1 ,
583
583
"The replication slot from Patroni gets deleted" , 10 , 5 )
@@ -933,7 +933,7 @@ def test_ignored_annotations(self):
933
933
},
934
934
}
935
935
}
936
-
936
+
937
937
old_sts_creation_timestamp = sts .metadata .creation_timestamp
938
938
k8s .api .apps_v1 .patch_namespaced_stateful_set (sts .metadata .name , sts .metadata .namespace , annotation_patch )
939
939
old_svc_creation_timestamp = svc .metadata .creation_timestamp
@@ -1370,7 +1370,7 @@ def test_persistent_volume_claim_retention_policy(self):
1370
1370
}
1371
1371
k8s .update_config (patch_scaled_policy_retain )
1372
1372
self .eventuallyEqual (lambda : k8s .get_operator_state (), {"0" : "idle" }, "Operator does not get in sync" )
1373
-
1373
+
1374
1374
# decrease the number of instances
1375
1375
k8s .api .custom_objects_api .patch_namespaced_custom_object (
1376
1376
'acid.zalan.do' , 'v1' , 'default' , 'postgresqls' , 'acid-minimal-cluster' , pg_patch_scale_down_instances )
@@ -1647,7 +1647,6 @@ def test_node_readiness_label(self):
1647
1647
# toggle pod anti affinity to move replica away from master node
1648
1648
self .assert_distributed_pods (master_nodes )
1649
1649
1650
-
1651
1650
@timeout_decorator .timeout (TEST_TIMEOUT_SEC )
1652
1651
def test_overwrite_pooler_deployment (self ):
1653
1652
pooler_name = 'acid-minimal-cluster-pooler'
@@ -1796,7 +1795,7 @@ def test_password_rotation(self):
1796
1795
},
1797
1796
}
1798
1797
k8s .api .core_v1 .patch_namespaced_secret (
1799
- name = "foo-user.acid-minimal-cluster.credentials.postgresql.acid.zalan.do" ,
1798
+ name = "foo-user.acid-minimal-cluster.credentials.postgresql.acid.zalan.do" ,
1800
1799
namespace = "default" ,
1801
1800
body = secret_fake_rotation )
1802
1801
@@ -1812,7 +1811,7 @@ def test_password_rotation(self):
1812
1811
"data" : {
1813
1812
"enable_password_rotation" : "true" ,
1814
1813
"password_rotation_interval" : "30" ,
1815
- "password_rotation_user_retention" : "30" , # should be set to 60
1814
+ "password_rotation_user_retention" : "30" , # should be set to 60
1816
1815
},
1817
1816
}
1818
1817
k8s .update_config (enable_password_rotation )
@@ -1865,7 +1864,7 @@ def test_password_rotation(self):
1865
1864
"Unexpected username in secret of test.db_user: expected {}, got {}" .format ("test.db_user" , secret_username ))
1866
1865
1867
1866
# disable password rotation for all other users (foo_user)
1868
- # and pick smaller intervals to see if the third fake rotation user is dropped
1867
+ # and pick smaller intervals to see if the third fake rotation user is dropped
1869
1868
enable_password_rotation = {
1870
1869
"data" : {
1871
1870
"enable_password_rotation" : "false" ,
@@ -2363,6 +2362,56 @@ def test_taint_based_eviction(self):
2363
2362
# toggle pod anti affinity to move replica away from master node
2364
2363
self .assert_distributed_pods (master_nodes )
2365
2364
2365
+ @timeout_decorator .timeout (TEST_TIMEOUT_SEC )
2366
+ def test_topology_spread_constraints (self ):
2367
+ '''
2368
+ Enable topologySpreadConstraints for pods
2369
+ '''
2370
+ k8s = self .k8s
2371
+ cluster_labels = "application=spilo,cluster-name=acid-minimal-cluster"
2372
+
2373
+ # Verify we are in good state from potential previous tests
2374
+ self .eventuallyEqual (lambda : k8s .count_running_pods (), 2 , "No 2 pods running" )
2375
+
2376
+ master_nodes , replica_nodes = k8s .get_cluster_nodes ()
2377
+ self .assertNotEqual (master_nodes , [])
2378
+ self .assertNotEqual (replica_nodes , [])
2379
+
2380
+ # Patch label to nodes for topologySpreadConstraints
2381
+ patch_node_label = {
2382
+ "metadata" : {
2383
+ "labels" : {
2384
+ "topology.kubernetes.io/zone" : "zalando"
2385
+ }
2386
+ }
2387
+ }
2388
+ k8s .api .core_v1 .patch_node (master_nodes [0 ], patch_node_label )
2389
+ k8s .api .core_v1 .patch_node (replica_nodes [0 ], patch_node_label )
2390
+
2391
+ # Scale-out postgresql pods
2392
+ k8s .api .custom_objects_api .patch_namespaced_custom_object ("acid.zalan.do" , "v1" , "default" , "postgresqls" , "acid-minimal-cluster" ,
2393
+ {"spec" : {"numberOfInstances" : 6 }})
2394
+ self .eventuallyEqual (lambda : k8s .get_operator_state (), {"0" : "idle" }, "Operator does not get in sync" )
2395
+ self .eventuallyEqual (lambda : k8s .count_pods_with_label (cluster_labels ), 6 , "Postgresql StatefulSet are scale to 6" )
2396
+ self .eventuallyEqual (lambda : k8s .count_running_pods (), 6 , "All pods are running" )
2397
+
2398
+ worker_node_1 = 0
2399
+ worker_node_2 = 0
2400
+ pods = k8s .api .core_v1 .list_namespaced_pod ('default' , label_selector = cluster_labels )
2401
+ for pod in pods .items :
2402
+ if pod .spec .node_name == 'postgres-operator-e2e-tests-worker' :
2403
+ worker_node_1 += 1
2404
+ elif pod .spec .node_name == 'postgres-operator-e2e-tests-worker2' :
2405
+ worker_node_2 += 1
2406
+
2407
+ self .assertEqual (worker_node_1 , worker_node_2 )
2408
+ self .assertEqual (worker_node_1 , 3 )
2409
+ self .assertEqual (worker_node_2 , 3 )
2410
+
2411
+ # Scale-it postgresql pods to previous replicas
2412
+ k8s .api .custom_objects_api .patch_namespaced_custom_object ("acid.zalan.do" , "v1" , "default" , "postgresqls" , "acid-minimal-cluster" ,
2413
+ {"spec" : {"numberOfInstances" : 2 }})
2414
+
2366
2415
@timeout_decorator .timeout (TEST_TIMEOUT_SEC )
2367
2416
def test_zz_cluster_deletion (self ):
2368
2417
'''
@@ -2438,7 +2487,7 @@ def test_zz_cluster_deletion(self):
2438
2487
self .eventuallyEqual (lambda : k8s .count_deployments_with_label (cluster_label ), 0 , "Deployments not deleted" )
2439
2488
self .eventuallyEqual (lambda : k8s .count_pdbs_with_label (cluster_label ), 0 , "Pod disruption budget not deleted" )
2440
2489
self .eventuallyEqual (lambda : k8s .count_secrets_with_label (cluster_label ), 8 , "Secrets were deleted although disabled in config" )
2441
- self .eventuallyEqual (lambda : k8s .count_pvcs_with_label (cluster_label ), 3 , "PVCs were deleted although disabled in config" )
2490
+ self .eventuallyEqual (lambda : k8s .count_pvcs_with_label (cluster_label ), 6 , "PVCs were deleted although disabled in config" )
2442
2491
2443
2492
except timeout_decorator .TimeoutError :
2444
2493
print ('Operator log: {}' .format (k8s .get_operator_log ()))
@@ -2480,7 +2529,7 @@ def assert_distributed_pods(self, target_nodes, cluster_labels='cluster-name=aci
2480
2529
2481
2530
# if nodes are different we can quit here
2482
2531
if master_nodes [0 ] not in replica_nodes :
2483
- return True
2532
+ return True
2484
2533
2485
2534
# enable pod anti affintiy in config map which should trigger movement of replica
2486
2535
patch_enable_antiaffinity = {
@@ -2504,7 +2553,7 @@ def assert_distributed_pods(self, target_nodes, cluster_labels='cluster-name=aci
2504
2553
}
2505
2554
k8s .update_config (patch_disable_antiaffinity , "disable antiaffinity" )
2506
2555
self .eventuallyEqual (lambda : k8s .get_operator_state (), {"0" : "idle" }, "Operator does not get in sync" )
2507
-
2556
+
2508
2557
k8s .wait_for_pod_start ('spilo-role=replica,' + cluster_labels )
2509
2558
k8s .wait_for_running_pods (cluster_labels , 2 )
2510
2559
@@ -2515,7 +2564,7 @@ def assert_distributed_pods(self, target_nodes, cluster_labels='cluster-name=aci
2515
2564
# if nodes are different we can quit here
2516
2565
for target_node in target_nodes :
2517
2566
if (target_node not in master_nodes or target_node not in replica_nodes ) and master_nodes [0 ] in replica_nodes :
2518
- print ('Pods run on the same node' )
2567
+ print ('Pods run on the same node' )
2519
2568
return False
2520
2569
2521
2570
except timeout_decorator .TimeoutError :
0 commit comments