Skip to content
This repository has been archived by the owner on Apr 12, 2022. It is now read-only.

Commit

Permalink
Merge pull request #167 from shivramsrivastava/max_pods_e2e
Browse files Browse the repository at this point in the history
Adding E2E test case for max-pods
  • Loading branch information
k8s-ci-robot authored Jan 28, 2019
2 parents e720acf + ea7e47a commit f2b3be5
Show file tree
Hide file tree
Showing 3 changed files with 172 additions and 4 deletions.
2 changes: 1 addition & 1 deletion pkg/k8sclient/events.go
Original file line number Diff line number Diff line change
Expand Up @@ -115,7 +115,7 @@ func (posiedonEvents *PoseidonEvents) ProcessFailureEvents(unscheduledTasks []ui
if poseidonToK8sPod, ok := PodToK8sPod[podIdentifier]; ok {
ProcessedPodEvents[podIdentifier] = poseidonToK8sPod
// send the failure event and update the pods status
posiedonEvents.podEvents.Recorder.Eventf(poseidonToK8sPod, corev1.EventTypeWarning, "FailedScheduling", "Firmament failed to schedule the pod %s in %s namespace", podIdentifier.Namespace, podIdentifier.Name)
posiedonEvents.podEvents.Recorder.Eventf(poseidonToK8sPod, corev1.EventTypeWarning, "FailedScheduling", "Firmament failed to schedule the pod %s in %s namespace", podIdentifier.Name, podIdentifier.Namespace)
Update(posiedonEvents.k8sClient, poseidonToK8sPod, &corev1.PodCondition{
Type: corev1.PodScheduled,
Status: corev1.ConditionFalse,
Expand Down
78 changes: 78 additions & 0 deletions test/e2e/framework/util.go
Original file line number Diff line number Diff line change
Expand Up @@ -539,3 +539,81 @@ func VerifyThatTaintIsGone(c clientset.Interface, nodeName string, taint *v1.Tai
}
Logf("%v taint completely removed from the node %v", taint, nodeName)
}

// GetPodsScheduled returns a number of currently scheduled and not scheduled Pods.
func GetPodsScheduled(pods *v1.PodList) (scheduledPods, notScheduledPods []v1.Pod) {
for _, pod := range pods.Items {
if !IsMasterNode(pod.Spec.NodeName) {
if pod.Spec.NodeName != "" {
_, scheduledCondition := GetPodCondition(&pod.Status, v1.PodScheduled)
Expect(scheduledCondition != nil).To(Equal(true))
Expect(scheduledCondition.Status).To(Equal(v1.ConditionTrue))
scheduledPods = append(scheduledPods, pod)
} else {
_, scheduledCondition := GetPodCondition(&pod.Status, v1.PodScheduled)
Expect(scheduledCondition != nil).To(Equal(true))
Expect(scheduledCondition.Status).To(Equal(v1.ConditionFalse))
if scheduledCondition.Reason == "Unschedulable" {

notScheduledPods = append(notScheduledPods, pod)
}
}
}
}
return
}

// WaitForStableCluster waits until all existing pods are scheduled and returns their amount.
func WaitForStableCluster(c clientset.Interface) int {
timeout := 10 * time.Minute
startTime := time.Now()

allPods, err := c.CoreV1().Pods(metav1.NamespaceAll).List(metav1.ListOptions{})
ExpectNoError(err)
// API server returns also Pods that succeeded. We need to filter them out.
currentPods := make([]v1.Pod, 0, len(allPods.Items))
for _, pod := range allPods.Items {
if pod.Status.Phase != v1.PodSucceeded && pod.Status.Phase != v1.PodFailed {
currentPods = append(currentPods, pod)
}

}
allPods.Items = currentPods
scheduledPods, currentlyNotScheduledPods := GetPodsScheduled(allPods)
for len(currentlyNotScheduledPods) != 0 {
time.Sleep(2 * time.Second)

allPods, err := c.CoreV1().Pods(metav1.NamespaceAll).List(metav1.ListOptions{})
ExpectNoError(err)
scheduledPods, currentlyNotScheduledPods = GetPodsScheduled(allPods)

if startTime.Add(timeout).Before(time.Now()) {
Failf("Timed out after %v waiting for stable cluster.", timeout)
break
}
}
return len(scheduledPods)
}

// GetPodCondition extracts the provided condition from the given status and returns that.
// Returns nil and -1 if the condition is not present, and the index of the located condition.
func GetPodCondition(status *v1.PodStatus, conditionType v1.PodConditionType) (int, *v1.PodCondition) {
if status == nil {
return -1, nil
}
return GetPodConditionFromList(status.Conditions, conditionType)
}

// GetPodConditionFromList extracts the provided condition from the given list of condition and
// returns the index of the condition and the condition. Returns -1 and nil if the condition is not present.
func GetPodConditionFromList(conditions []v1.PodCondition, conditionType v1.PodConditionType) (int, *v1.PodCondition) {
if conditions == nil {
return -1, nil
}
for i := range conditions {
if conditions[i].Type == conditionType {
return i, &conditions[i]
}
}
return -1, nil
}
96 changes: 93 additions & 3 deletions test/e2e/poseidon_integration.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,9 +18,6 @@ package test

import (
"fmt"
"math/rand"
"os"

"github.com/golang/glog"
"github.com/kubernetes-sigs/poseidon/test/e2e/framework"
. "github.com/onsi/ginkgo"
Expand All @@ -32,6 +29,8 @@ import (
"k8s.io/apimachinery/pkg/api/resource"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"math/rand"
"os"
"time"
)

Expand Down Expand Up @@ -2049,6 +2048,97 @@ var _ = Describe("Poseidon", func() {
})

})

Describe("Poseidon [Max-Pods Test]", func() {
// Test whether the kubelet flag max_pods works
// Currently the test uses the node's default max_pods value, i.e., we don't manually set the kubelet flag max_pods;
// And only test on one of the nodes.
It("validates MaxPods limit number of pods that are allowed to run [Slow]", func() {
var totalPodCapacity int64
var podsNeededForSaturation int32
totalPodCapacity = 0
schedulableNodes := framework.ListSchedulableNodes(clientset)
for _, node := range schedulableNodes {
podCapacity, found := node.Status.Capacity[v1.ResourcePods]
Expect(found).To(Equal(true))
totalPodCapacity += podCapacity.Value()
}
currentlyScheduledPods := framework.WaitForStableCluster(clientset)
podsNeededForSaturation = int32(totalPodCapacity) - int32(currentlyScheduledPods)
name := "max-pods"
if podsNeededForSaturation > 0 {
deployment, err := clientset.ExtensionsV1beta1().Deployments(f.Namespace.Name).Create(&v1beta1.Deployment{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{"app": "nginx"},
Name: name,
},
Spec: v1beta1.DeploymentSpec{
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{"app": "nginx", "name": "test-max-pods"},
},
Replicas: &podsNeededForSaturation,
Template: v1.PodTemplateSpec{
ObjectMeta: metav1.ObjectMeta{
Labels: map[string]string{"name": "test-max-pods", "app": "nginx", "schedulerName": "poseidon"},
Name: name,
},
Spec: v1.PodSpec{
SchedulerName: "poseidon",
Containers: []v1.Container{
{
Name: fmt.Sprintf("container-%s", name),
Image: "nginx:latest",
ImagePullPolicy: "IfNotPresent",
},
},
},
},
},
})
Expect(err).NotTo(HaveOccurred())

By("Waiting for the Deployment to have running status")
err = f.WaitForDeploymentComplete(deployment)
Expect(err).NotTo(HaveOccurred())
deployment, err = clientset.ExtensionsV1beta1().Deployments(ns).Get(name, metav1.GetOptions{})
Expect(err).NotTo(HaveOccurred())

By(fmt.Sprintf("Creation of deployment %q in namespace %q succeeded.", deployment.Name, ns))
Expect(deployment.Status.Replicas).To(Equal(deployment.Status.AvailableReplicas))

defer func() {
By("Pod was in Running state... Time to delete the deployment now...")
err = f.WaitForDeploymentDelete(deployment)
Expect(err).NotTo(HaveOccurred())
By("Waiting 5 seconds")
By("Check for deployment deletion")
_, err = clientset.ExtensionsV1beta1().Deployments(ns).Get(name, metav1.GetOptions{})
if err != nil {
Expect(errors.IsNotFound(err)).To(Equal(true))
}
}()

}
podName := "additional-pod"
// Create a K8s Pod with poseidon
f.WaitForSchedulerAfterAction(func() error {
_, err := clientset.CoreV1().Pods(ns).Create(&v1.Pod{
ObjectMeta: metav1.ObjectMeta{
Name: podName,
Labels: map[string]string{"name": "additional"},
},
Spec: v1.PodSpec{
SchedulerName: "poseidon",
Containers: []v1.Container{{
Name: fmt.Sprintf("container-%s", podName),
Image: "nginx:latest",
ImagePullPolicy: "IfNotPresent",
}}},
})
return err
}, f.Namespace.Name, podName, false)
})
})
})

func getNodeThatCanRunPodWithoutToleration(f *framework.Framework) string {
Expand Down

0 comments on commit f2b3be5

Please sign in to comment.