Skip to content

Commit

Permalink
fix init container handling, allow to dump logs for completed pods (a…
Browse files Browse the repository at this point in the history
…dd --live)
  • Loading branch information
xrstf committed May 5, 2022
1 parent 90fb4f1 commit 3882588
Show file tree
Hide file tree
Showing 2 changed files with 22 additions and 9 deletions.
4 changes: 3 additions & 1 deletion main.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@ type options struct {
namespaces []string
containerNames []string
labels string
live bool
flatFiles bool
verbose bool
}
Expand All @@ -39,6 +40,7 @@ func main() {
pflag.StringVarP(&opt.labels, "labels", "l", opt.labels, "Label-selector as an alternative to specifying resource names")
pflag.StringVarP(&opt.directory, "output", "o", opt.directory, "Directory where logs should be stored")
pflag.BoolVarP(&opt.flatFiles, "flat", "f", opt.flatFiles, "Do not create directory per namespace, but put all logs in the same directory")
pflag.BoolVar(&opt.live, "live", opt.live, "Only consider running pods, ignore completed/failed pods")
pflag.BoolVarP(&opt.verbose, "verbose", "v", opt.verbose, "Enable more verbose output")
pflag.Parse()

Expand Down Expand Up @@ -127,6 +129,6 @@ func main() {
log.Fatalf("Failed to create watch for pods: %v", err)
}

w := watcher.NewWatcher(clientset, c, log, opt.namespaces, args, opt.containerNames)
w := watcher.NewWatcher(clientset, c, log, opt.namespaces, args, opt.containerNames, opt.live)
w.Watch(rootCtx, wi)
}
27 changes: 19 additions & 8 deletions pkg/watcher/watcher.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,9 +25,10 @@ type Watcher struct {
resourceNames []string
containerNames []string
seenContainers sets.String
runningOnly bool
}

func NewWatcher(clientset *kubernetes.Clientset, c collector.Collector, log logrus.FieldLogger, namespaces, resourceNames, containerNames []string) *Watcher {
func NewWatcher(clientset *kubernetes.Clientset, c collector.Collector, log logrus.FieldLogger, namespaces, resourceNames, containerNames []string, runningOnly bool) *Watcher {
return &Watcher{
clientset: clientset,
log: log,
Expand All @@ -36,6 +37,7 @@ func NewWatcher(clientset *kubernetes.Clientset, c collector.Collector, log logr
resourceNames: resourceNames,
containerNames: containerNames,
seenContainers: sets.NewString(),
runningOnly: runningOnly,
}
}

Expand All @@ -59,10 +61,14 @@ func (w *Watcher) Watch(ctx context.Context, wi watch.Interface) {
}

func (w *Watcher) startLogCollectors(ctx context.Context, pod *corev1.Pod) {
allContainers := append(pod.Spec.InitContainers, pod.Spec.Containers...)
w.startLogCollectorsForContainers(ctx, pod, pod.Spec.InitContainers, pod.Status.InitContainerStatuses)
w.startLogCollectorsForContainers(ctx, pod, pod.Spec.Containers, pod.Status.ContainerStatuses)
}

func (w *Watcher) startLogCollectorsForContainers(ctx context.Context, pod *corev1.Pod, containers []corev1.Container, statuses []corev1.ContainerStatus) {
podLog := w.getPodLog(pod)

for _, container := range allContainers {
for _, container := range containers {
containerName := container.Name
containerLog := podLog.WithField("container", containerName)

Expand All @@ -72,9 +78,9 @@ func (w *Watcher) startLogCollectors(ctx context.Context, pod *corev1.Pod) {
}

var status *corev1.ContainerStatus
for i, s := range pod.Status.ContainerStatuses {
for i, s := range statuses {
if s.Name == containerName {
status = &pod.Status.ContainerStatuses[i]
status = &statuses[i]
break
}
}
Expand All @@ -85,9 +91,14 @@ func (w *Watcher) startLogCollectors(ctx context.Context, pod *corev1.Pod) {
continue
}

// container is not running
if status.State.Running == nil {
containerLog.Debug("Container is not running.")
// container sttaus not what we want
if w.runningOnly {
if status.State.Running == nil {
containerLog.Debug("Container is not running.")
continue
}
} else if status.State.Running == nil && status.State.Terminated == nil {
containerLog.Debug("Container is still waiting.")
continue
}

Expand Down

0 comments on commit 3882588

Please sign in to comment.