From 3882588f63f6daf3902719128b9004004b2bba8f Mon Sep 17 00:00:00 2001 From: xrstf Date: Thu, 5 May 2022 10:52:47 +0200 Subject: [PATCH] fix init container handling, allow to dump logs for completed pods (add --live) --- main.go | 4 +++- pkg/watcher/watcher.go | 27 +++++++++++++++++++-------- 2 files changed, 22 insertions(+), 9 deletions(-) diff --git a/main.go b/main.go index b375e41..d5d632f 100644 --- a/main.go +++ b/main.go @@ -25,6 +25,7 @@ type options struct { namespaces []string containerNames []string labels string + live bool flatFiles bool verbose bool } @@ -39,6 +40,7 @@ func main() { pflag.StringVarP(&opt.labels, "labels", "l", opt.labels, "Label-selector as an alternative to specifying resource names") pflag.StringVarP(&opt.directory, "output", "o", opt.directory, "Directory where logs should be stored") pflag.BoolVarP(&opt.flatFiles, "flat", "f", opt.flatFiles, "Do not create directory per namespace, but put all logs in the same directory") + pflag.BoolVar(&opt.live, "live", opt.live, "Only consider running pods, ignore completed/failed pods") pflag.BoolVarP(&opt.verbose, "verbose", "v", opt.verbose, "Enable more verbose output") pflag.Parse() @@ -127,6 +129,6 @@ func main() { log.Fatalf("Failed to create watch for pods: %v", err) } - w := watcher.NewWatcher(clientset, c, log, opt.namespaces, args, opt.containerNames) + w := watcher.NewWatcher(clientset, c, log, opt.namespaces, args, opt.containerNames, opt.live) w.Watch(rootCtx, wi) } diff --git a/pkg/watcher/watcher.go b/pkg/watcher/watcher.go index d7e478d..5f47d02 100644 --- a/pkg/watcher/watcher.go +++ b/pkg/watcher/watcher.go @@ -25,9 +25,10 @@ type Watcher struct { resourceNames []string containerNames []string seenContainers sets.String + runningOnly bool } -func NewWatcher(clientset *kubernetes.Clientset, c collector.Collector, log logrus.FieldLogger, namespaces, resourceNames, containerNames []string) *Watcher { +func NewWatcher(clientset *kubernetes.Clientset, c collector.Collector, log logrus.FieldLogger, namespaces, resourceNames, containerNames []string, runningOnly bool) *Watcher { return &Watcher{ clientset: clientset, log: log, @@ -36,6 +37,7 @@ func NewWatcher(clientset *kubernetes.Clientset, c collector.Collector, log logr resourceNames: resourceNames, containerNames: containerNames, seenContainers: sets.NewString(), + runningOnly: runningOnly, } } @@ -59,10 +61,14 @@ func (w *Watcher) Watch(ctx context.Context, wi watch.Interface) { } func (w *Watcher) startLogCollectors(ctx context.Context, pod *corev1.Pod) { - allContainers := append(pod.Spec.InitContainers, pod.Spec.Containers...) + w.startLogCollectorsForContainers(ctx, pod, pod.Spec.InitContainers, pod.Status.InitContainerStatuses) + w.startLogCollectorsForContainers(ctx, pod, pod.Spec.Containers, pod.Status.ContainerStatuses) +} + +func (w *Watcher) startLogCollectorsForContainers(ctx context.Context, pod *corev1.Pod, containers []corev1.Container, statuses []corev1.ContainerStatus) { podLog := w.getPodLog(pod) - for _, container := range allContainers { + for _, container := range containers { containerName := container.Name containerLog := podLog.WithField("container", containerName) @@ -72,9 +78,9 @@ func (w *Watcher) startLogCollectors(ctx context.Context, pod *corev1.Pod) { } var status *corev1.ContainerStatus - for i, s := range pod.Status.ContainerStatuses { + for i, s := range statuses { if s.Name == containerName { - status = &pod.Status.ContainerStatuses[i] + status = &statuses[i] break } } @@ -85,9 +91,14 @@ func (w *Watcher) startLogCollectors(ctx context.Context, pod *corev1.Pod) { continue } - // container is not running - if status.State.Running == nil { - containerLog.Debug("Container is not running.") + // container sttaus not what we want + if w.runningOnly { + if status.State.Running == nil { + containerLog.Debug("Container is not running.") + continue + } + } else if status.State.Running == nil && status.State.Terminated == nil { + containerLog.Debug("Container is still waiting.") continue }