From a95a77c89e778200f3e5089f2afba86d77a392eb Mon Sep 17 00:00:00 2001 From: Pavel Tishkov Date: Thu, 4 Dec 2025 20:47:47 +0300 Subject: [PATCH 1/4] feat(cli): add debug bundle Signed-off-by: Pavel Tishkov --- src/cli/internal/clientconfig/clientconfig.go | 13 + .../internal/cmd/debugbundle/collectors.go | 462 ++++++++++++++++++ .../internal/cmd/debugbundle/debugbundle.go | 141 ++++++ src/cli/pkg/command/virtualization.go | 2 + 4 files changed, 618 insertions(+) create mode 100644 src/cli/internal/cmd/debugbundle/collectors.go create mode 100644 src/cli/internal/cmd/debugbundle/debugbundle.go diff --git a/src/cli/internal/clientconfig/clientconfig.go b/src/cli/internal/clientconfig/clientconfig.go index 1b7c9f3d99..2b502366ca 100644 --- a/src/cli/internal/clientconfig/clientconfig.go +++ b/src/cli/internal/clientconfig/clientconfig.go @@ -20,6 +20,7 @@ import ( "context" "fmt" + "k8s.io/client-go/rest" "k8s.io/client-go/tools/clientcmd" "github.com/deckhouse/virtualization/api/client/kubeclient" @@ -49,3 +50,15 @@ func ClientAndNamespaceFromContext(ctx context.Context) (client kubeclient.Clien } return client, namespace, overridden, nil } + +func GetRESTConfig(ctx context.Context) (*rest.Config, error) { + clientConfig, ok := ctx.Value(clientConfigKey).(clientcmd.ClientConfig) + if !ok { + return nil, fmt.Errorf("unable to get client config from context") + } + config, err := clientConfig.ClientConfig() + if err != nil { + return nil, err + } + return config, nil +} diff --git a/src/cli/internal/cmd/debugbundle/collectors.go b/src/cli/internal/cmd/debugbundle/collectors.go new file mode 100644 index 0000000000..4f57c5bf33 --- /dev/null +++ b/src/cli/internal/cmd/debugbundle/collectors.go @@ -0,0 +1,462 @@ +/* +Copyright 2025 Flant JSC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package debugbundle + +import ( + "context" + "encoding/json" + "fmt" + "io" + "strings" + + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/runtime/schema" + "sigs.k8s.io/yaml" + + "github.com/deckhouse/virtualization/api/client/kubeclient" + "github.com/deckhouse/virtualization/api/core/v1alpha2" +) + +const ( + internalAPIGroup = "internal.virtualization.deckhouse.io" + internalAPIVersion = "v1" + systemNamespace = "d8-virtualization" + coreAPIVersion = "v1" +) + +func (b *DebugBundle) collectVMResources(ctx context.Context, client kubeclient.Client, namespace, vmName string) error { + // Get VM + vm, err := client.VirtualMachines(namespace).Get(ctx, vmName, metav1.GetOptions{}) + if err != nil { + if b.handleError("VirtualMachine", vmName, err) { + return nil + } + return err + } + b.outputResource("VirtualMachine", vmName, namespace, vm) + + // Get IVVM + ivvm, err := b.getInternalResource(ctx, "internalvirtualizationvirtualmachines", namespace, vmName) + if err == nil { + b.outputResource("InternalVirtualizationVirtualMachine", vmName, namespace, ivvm) + } else if !b.handleError("InternalVirtualizationVirtualMachine", vmName, err) { + return err + } + + // Get IVVMI + ivvmi, err := b.getInternalResource(ctx, "internalvirtualizationvirtualmachineinstances", namespace, vmName) + if err == nil { + b.outputResource("InternalVirtualizationVirtualMachineInstance", vmName, namespace, ivvmi) + } else if !b.handleError("InternalVirtualizationVirtualMachineInstance", vmName, err) { + return err + } + + // Get VM operations + vmUID := string(vm.UID) + vmops, err := client.VirtualMachineOperations(namespace).List(ctx, metav1.ListOptions{ + LabelSelector: fmt.Sprintf("virtualization.deckhouse.io/virtual-machine-uid=%s", vmUID), + }) + if err == nil { + for _, vmop := range vmops.Items { + b.outputResource("VirtualMachineOperation", vmop.Name, namespace, &vmop) + } + } else if !b.handleError("VirtualMachineOperation", "", err) { + return err + } + + // Get migrations + migrations, err := b.getInternalResourceList(ctx, "internalvirtualizationvirtualmachineinstancemigrations", namespace) + if err == nil { + for _, item := range migrations { + vmiName, found, _ := unstructured.NestedString(item.Object, "spec", "vmiName") + if found && vmiName == vmName { + name, _, _ := unstructured.NestedString(item.Object, "metadata", "name") + b.outputResource("InternalVirtualizationVirtualMachineInstanceMigration", name, namespace, item) + } + } + } else if !b.handleError("InternalVirtualizationVirtualMachineInstanceMigration", "", err) { + return err + } + + // Get events for VM + b.collectEvents(ctx, client, namespace, "VirtualMachine", vmName) + + return nil +} + +func (b *DebugBundle) collectBlockDevices(ctx context.Context, client kubeclient.Client, namespace, vmName string) error { + vm, err := client.VirtualMachines(namespace).Get(ctx, vmName, metav1.GetOptions{}) + if err != nil { + return err + } + + // Static block devices + for _, bdRef := range vm.Spec.BlockDeviceRefs { + if err := b.collectBlockDevice(ctx, client, namespace, bdRef.Kind, bdRef.Name); err != nil { + if !b.handleError(string(bdRef.Kind), bdRef.Name, err) { + return err + } + } + } + + // Hotplug block devices + for _, bdRef := range vm.Status.BlockDeviceRefs { + if bdRef.Hotplugged { + if err := b.collectBlockDevice(ctx, client, namespace, bdRef.Kind, bdRef.Name); err != nil { + if !b.handleError(string(bdRef.Kind), bdRef.Name, err) { + return err + } + } + + // Get VMBDA + if bdRef.VirtualMachineBlockDeviceAttachmentName != "" { + vmbda, err := client.VirtualMachineBlockDeviceAttachments(namespace).Get(ctx, bdRef.VirtualMachineBlockDeviceAttachmentName, metav1.GetOptions{}) + if err == nil { + b.outputResource("VirtualMachineBlockDeviceAttachment", vmbda.Name, namespace, vmbda) + b.collectEvents(ctx, client, namespace, "VirtualMachineBlockDeviceAttachment", vmbda.Name) + } else if !b.handleError("VirtualMachineBlockDeviceAttachment", bdRef.VirtualMachineBlockDeviceAttachmentName, err) { + return err + } + } + } + } + + // Get all VMBDA that reference this VM + vmbdas, err := client.VirtualMachineBlockDeviceAttachments(namespace).List(ctx, metav1.ListOptions{}) + if err == nil { + for _, vmbda := range vmbdas.Items { + if vmbda.Spec.VirtualMachineName == vmName { + b.outputResource("VirtualMachineBlockDeviceAttachment", vmbda.Name, namespace, &vmbda) + b.collectEvents(ctx, client, namespace, "VirtualMachineBlockDeviceAttachment", vmbda.Name) + + // Get associated block device + if vmbda.Spec.BlockDeviceRef.Kind != "" && vmbda.Spec.BlockDeviceRef.Name != "" { + // Convert VMBDAObjectRefKind to BlockDeviceKind + var bdKind v1alpha2.BlockDeviceKind + switch vmbda.Spec.BlockDeviceRef.Kind { + case v1alpha2.VMBDAObjectRefKindVirtualDisk: + bdKind = v1alpha2.VirtualDiskKind + case v1alpha2.VMBDAObjectRefKindVirtualImage: + bdKind = v1alpha2.VirtualImageKind + case v1alpha2.VMBDAObjectRefKindClusterVirtualImage: + bdKind = v1alpha2.ClusterVirtualImageKind + default: + continue + } + if err := b.collectBlockDevice(ctx, client, namespace, bdKind, vmbda.Spec.BlockDeviceRef.Name); err != nil { + if !b.handleError(string(bdKind), vmbda.Spec.BlockDeviceRef.Name, err) { + return err + } + } + } + } + } + } else if !b.handleError("VirtualMachineBlockDeviceAttachment", "", err) { + return err + } + + return nil +} + +func (b *DebugBundle) collectBlockDevice(ctx context.Context, client kubeclient.Client, namespace string, kind v1alpha2.BlockDeviceKind, name string) error { + switch kind { + case v1alpha2.VirtualDiskKind: + vd, err := client.VirtualDisks(namespace).Get(ctx, name, metav1.GetOptions{}) + if err != nil { + return err + } + b.outputResource("VirtualDisk", name, namespace, vd) + b.collectEvents(ctx, client, namespace, "VirtualDisk", name) + + // Get PVC + if vd.Status.Target.PersistentVolumeClaim != "" { + pvc, err := client.CoreV1().PersistentVolumeClaims(namespace).Get(ctx, vd.Status.Target.PersistentVolumeClaim, metav1.GetOptions{}) + if err == nil { + b.outputResource("PersistentVolumeClaim", pvc.Name, namespace, pvc) + b.collectEvents(ctx, client, namespace, "PersistentVolumeClaim", pvc.Name) + + // Get PV + if pvc.Spec.VolumeName != "" { + pv, err := client.CoreV1().PersistentVolumes().Get(ctx, pvc.Spec.VolumeName, metav1.GetOptions{}) + if err == nil { + b.outputResource("PersistentVolume", pv.Name, "", pv) + } else if !b.handleError("PersistentVolume", pvc.Spec.VolumeName, err) { + return err + } + } + } else if !b.handleError("PersistentVolumeClaim", vd.Status.Target.PersistentVolumeClaim, err) { + return err + } + } + + case v1alpha2.VirtualImageKind: + vi, err := client.VirtualImages(namespace).Get(ctx, name, metav1.GetOptions{}) + if err != nil { + return err + } + b.outputResource("VirtualImage", name, namespace, vi) + b.collectEvents(ctx, client, namespace, "VirtualImage", name) + + case v1alpha2.ClusterVirtualImageKind: + cvi, err := client.ClusterVirtualImages().Get(ctx, name, metav1.GetOptions{}) + if err != nil { + return err + } + b.outputResource("ClusterVirtualImage", name, "", cvi) + // ClusterVirtualImage doesn't have events in namespace + + default: + return fmt.Errorf("unknown block device kind: %s", kind) + } + + return nil +} + +func (b *DebugBundle) collectPods(ctx context.Context, client kubeclient.Client, namespace, vmName string) error { + pods, err := client.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{ + LabelSelector: fmt.Sprintf("vm.kubevirt.internal.virtualization.deckhouse.io/name=%s", vmName), + }) + if err != nil { + if b.handleError("Pod", "", err) { + return nil + } + return err + } + + for _, pod := range pods.Items { + b.outputResource("Pod", pod.Name, namespace, &pod) + b.collectEvents(ctx, client, namespace, "Pod", pod.Name) + + if b.saveLogs { + b.collectSinglePodLogs(ctx, client, namespace, pod.Name, false) + } + } + + return nil +} + +func (b *DebugBundle) collectSystemLogs(ctx context.Context, client kubeclient.Client) error { + pods, err := client.CoreV1().Pods(systemNamespace).List(ctx, metav1.ListOptions{}) + if err != nil { + if b.handleError("Pod", systemNamespace, err) { + return nil + } + return err + } + + for _, pod := range pods.Items { + b.collectSinglePodLogs(ctx, client, systemNamespace, pod.Name, true) + } + return nil +} + +func (b *DebugBundle) collectSinglePodLogs(ctx context.Context, client kubeclient.Client, namespace, podName string, isSystem bool) { + logPrefix := fmt.Sprintf("%s/%s", namespace, podName) + if isSystem { + logPrefix = fmt.Sprintf("system/%s/%s", namespace, podName) + } + + // Get current logs + req := client.CoreV1().Pods(namespace).GetLogs(podName, &corev1.PodLogOptions{}) + if logStream, err := req.Stream(ctx); err == nil { + if logContent, err := io.ReadAll(logStream); err == nil { + fmt.Fprintf(b.stderr, "\n# %s\n", logPrefix) + fmt.Fprintf(b.stderr, "%s\n", string(logContent)) + } + logStream.Close() + } + + // Get previous logs + req = client.CoreV1().Pods(namespace).GetLogs(podName, &corev1.PodLogOptions{Previous: true}) + if logStream, err := req.Stream(ctx); err == nil { + if logContent, err := io.ReadAll(logStream); err == nil { + fmt.Fprintf(b.stderr, "\n# %s (previous)\n", logPrefix) + fmt.Fprintf(b.stderr, "%s\n", string(logContent)) + } + logStream.Close() + } +} + +func (b *DebugBundle) collectEvents(ctx context.Context, client kubeclient.Client, namespace, resourceType, resourceName string) { + events, err := client.CoreV1().Events(namespace).List(ctx, metav1.ListOptions{ + FieldSelector: fmt.Sprintf("involvedObject.name=%s", resourceName), + }) + if err != nil { + if b.handleError("Event", resourceName, err) { + return + } + return + } + + // Add each event individually to preserve TypeMeta + for i := range events.Items { + b.outputResource("Event", fmt.Sprintf("%s-%s-%d", strings.ToLower(resourceType), resourceName, i), namespace, &events.Items[i]) + } +} + +func (b *DebugBundle) getInternalGVR(resource string) schema.GroupVersionResource { + return schema.GroupVersionResource{ + Group: internalAPIGroup, + Version: internalAPIVersion, + Resource: resource, + } +} + +func (b *DebugBundle) getInternalResource(ctx context.Context, resource, namespace, name string) (*unstructured.Unstructured, error) { + obj, err := b.dynamicClient.Resource(b.getInternalGVR(resource)).Namespace(namespace).Get(ctx, name, metav1.GetOptions{}) + if err != nil { + return nil, err + } + return obj, nil +} + +func (b *DebugBundle) getInternalResourceList(ctx context.Context, resource, namespace string) ([]*unstructured.Unstructured, error) { + list, err := b.dynamicClient.Resource(b.getInternalGVR(resource)).Namespace(namespace).List(ctx, metav1.ListOptions{}) + if err != nil { + return nil, err + } + + result := make([]*unstructured.Unstructured, len(list.Items)) + for i := range list.Items { + result[i] = &list.Items[i] + } + return result, nil +} + +func (b *DebugBundle) outputResource(kind, name, namespace string, obj runtime.Object) error { + // Check if object is unstructured and prepare it + unstructuredObj, isUnstructured := obj.(*unstructured.Unstructured) + if isUnstructured { + // For unstructured objects, ensure kind is set if missing + // apiVersion should already be present from cluster, but will be handled below if missing + if unstructuredObj.GetKind() == "" { + unstructuredObj.SetKind(kind) + } + } else { + // For typed objects, ensure GVK is set + // TypeMeta is embedded in the struct, but we need to ensure GVK is set + // so that it's properly serialized + gvk := obj.GetObjectKind().GroupVersionKind() + if gvk.Kind == "" { + gvk.Kind = kind + } + // If Group or Version is empty, try to infer from the object type + // Objects from cluster should have GVK set, but if not, we'll add it during JSON processing + obj.GetObjectKind().SetGroupVersionKind(gvk) + } + + // Output separator if not first resource + if b.resourceCount > 0 { + fmt.Fprintf(b.stdout, "\n---\n") + } + b.resourceCount++ + + // Convert to JSON first to preserve all fields including TypeMeta (kind, apiVersion, spec, status, etc.) + // For typed objects, TypeMeta is embedded in the struct with json:",inline" tag, + // so json.Marshal should include it. However, if TypeMeta fields are empty, + // they might be omitted due to omitempty tags. We ensure GVK is set above. + jsonBytes, err := json.Marshal(obj) + if err != nil { + return fmt.Errorf("failed to marshal %s/%s to JSON: %w", kind, name, err) + } + + // Always parse JSON and ensure apiVersion and kind are present + // This handles cases where TypeMeta might not be serialized properly + var jsonObj map[string]interface{} + if err := json.Unmarshal(jsonBytes, &jsonObj); err == nil { + needsUpdate := false + // If apiVersion is missing, try to get it from the object itself + if _, ok := jsonObj["apiVersion"]; !ok { + var apiVersion string + + // For unstructured objects, get apiVersion directly + if isUnstructured { + apiVersion = unstructuredObj.GetAPIVersion() + } else { + // For typed objects, get apiVersion from GVK + // Objects from cluster should have GVK set correctly + gvk := obj.GetObjectKind().GroupVersionKind() + + // If GVK is empty, try to get it from scheme + if gvk.Kind == "" || (gvk.Group == "" && gvk.Version == "") { + // Try to get GVK from scheme + if gvks, _, err := kubeclient.Scheme.ObjectKinds(obj); err == nil && len(gvks) > 0 { + gvk = gvks[0] + // Set GVK on the object for future use + obj.GetObjectKind().SetGroupVersionKind(gvk) + } + } + + // Use GroupVersion().String() which automatically formats as "group/version" or "version" + // This works for both custom resources (group/version) and core resources (version only) + apiVersion = gvk.GroupVersion().String() + } + + // If we got a valid apiVersion, use it + if apiVersion != "" && apiVersion != "/" { + jsonObj["apiVersion"] = apiVersion + needsUpdate = true + } else { + // Fallback: for core Kubernetes resources, use "v1" + // This handles cases where GVK is not set for core resources + coreKinds := map[string]bool{ + "Pod": true, + "PersistentVolumeClaim": true, + "PersistentVolume": true, + "Event": true, + "Service": true, + "ConfigMap": true, + "Secret": true, + } + if coreKinds[kind] { + jsonObj["apiVersion"] = coreAPIVersion + needsUpdate = true + } + } + } + // Ensure kind is also present + if _, ok := jsonObj["kind"]; !ok { + jsonObj["kind"] = kind + needsUpdate = true + } + // Re-marshal with apiVersion and kind if needed + if needsUpdate { + jsonBytes, err = json.Marshal(jsonObj) + if err != nil { + return fmt.Errorf("failed to re-marshal %s/%s to JSON: %w", kind, name, err) + } + } + } + + // Convert JSON to YAML - this preserves all fields + yamlBytes, err := yaml.JSONToYAML(jsonBytes) + if err != nil { + return fmt.Errorf("failed to convert %s/%s to YAML: %w", kind, name, err) + } + + // Output comment and full YAML resource + fmt.Fprintf(b.stdout, "# %s: %s", kind, name) + if namespace != "" { + fmt.Fprintf(b.stdout, " (namespace: %s)", namespace) + } + fmt.Fprintf(b.stdout, "\n%s", string(yamlBytes)) + + return nil +} diff --git a/src/cli/internal/cmd/debugbundle/debugbundle.go b/src/cli/internal/cmd/debugbundle/debugbundle.go new file mode 100644 index 0000000000..40cb167fb6 --- /dev/null +++ b/src/cli/internal/cmd/debugbundle/debugbundle.go @@ -0,0 +1,141 @@ +/* +Copyright 2025 Flant JSC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package debugbundle + +import ( + "context" + "fmt" + "io" + + "github.com/spf13/cobra" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/client-go/dynamic" + + "github.com/deckhouse/virtualization/api/client/kubeclient" + "github.com/deckhouse/virtualization/src/cli/internal/clientconfig" + "github.com/deckhouse/virtualization/src/cli/internal/templates" +) + +func NewCommand() *cobra.Command { + bundle := &DebugBundle{} + cmd := &cobra.Command{ + Use: "debug-bundle (VirtualMachine)", + Short: "Create a debug bundle with VM configuration, events, and logs.", + Example: usage(), + Args: templates.ExactArgs("debug-bundle", 1), + RunE: bundle.Run, + } + + cmd.Flags().BoolVar(&bundle.saveLogs, "save-logs", false, "Save pod logs to output") + cmd.Flags().BoolVar(&bundle.saveSystemLogs, "save-system-logs", false, "Save system component logs to output") + cmd.Flags().BoolVar(&bundle.debug, "debug", false, "Enable debug output for permission errors") + cmd.SetUsageTemplate(templates.UsageTemplate()) + return cmd +} + +type DebugBundle struct { + saveLogs bool + saveSystemLogs bool + debug bool + dynamicClient dynamic.Interface + stdout io.Writer + stderr io.Writer + resourceCount int +} + +func usage() string { + return ` # Create debug bundle for VirtualMachine 'myvm': + {{ProgramName}} debug-bundle myvm + {{ProgramName}} debug-bundle myvm.mynamespace + {{ProgramName}} debug-bundle myvm -n mynamespace + # Include pod logs: + {{ProgramName}} debug-bundle --save-logs myvm + # Include system component logs: + {{ProgramName}} debug-bundle --save-system-logs myvm` +} + +func (b *DebugBundle) Run(cmd *cobra.Command, args []string) error { + client, defaultNamespace, _, err := clientconfig.ClientAndNamespaceFromContext(cmd.Context()) + if err != nil { + return err + } + + namespace, name, err := templates.ParseTarget(args[0]) + if err != nil { + return err + } + if namespace == "" { + namespace = defaultNamespace + } + + // Get dynamic client for internal resources + config, err := clientconfig.GetRESTConfig(cmd.Context()) + if err != nil { + return fmt.Errorf("failed to get REST config: %w", err) + } + b.dynamicClient, err = dynamic.NewForConfig(config) + if err != nil { + return fmt.Errorf("failed to create dynamic client: %w", err) + } + + // Set output writers + b.stdout = cmd.OutOrStdout() + b.stderr = cmd.ErrOrStderr() + + // Collect and output resources immediately + if err := b.collectResources(cmd.Context(), client, namespace, name); err != nil { + return err + } + + return nil +} + +func (b *DebugBundle) collectResources(ctx context.Context, client kubeclient.Client, namespace, vmName string) error { + // Collect VM and core resources + if err := b.collectVMResources(ctx, client, namespace, vmName); err != nil { + return fmt.Errorf("failed to collect VM resources: %w", err) + } + + // Collect block devices + if err := b.collectBlockDevices(ctx, client, namespace, vmName); err != nil { + return fmt.Errorf("failed to collect block devices: %w", err) + } + + // Collect pods (and logs if requested) + if err := b.collectPods(ctx, client, namespace, vmName); err != nil { + return fmt.Errorf("failed to collect pods: %w", err) + } + + // Collect system logs if requested + if b.saveSystemLogs { + if err := b.collectSystemLogs(ctx, client); err != nil { + return fmt.Errorf("failed to collect system logs: %w", err) + } + } + + return nil +} + +func (b *DebugBundle) handleError(resourceType, resourceName string, err error) bool { + if errors.IsForbidden(err) || errors.IsUnauthorized(err) { + if b.debug { + fmt.Fprintf(b.stderr, "Warning: Skipping %s/%s: permission denied\n", resourceType, resourceName) + } + return true // Skip this resource + } + return false // Don't skip, propagate error +} diff --git a/src/cli/pkg/command/virtualization.go b/src/cli/pkg/command/virtualization.go index 403c0047c2..ac884a8f6f 100644 --- a/src/cli/pkg/command/virtualization.go +++ b/src/cli/pkg/command/virtualization.go @@ -32,6 +32,7 @@ import ( "github.com/deckhouse/virtualization/api/client/kubeclient" "github.com/deckhouse/virtualization/src/cli/internal/clientconfig" "github.com/deckhouse/virtualization/src/cli/internal/cmd/console" + "github.com/deckhouse/virtualization/src/cli/internal/cmd/debugbundle" "github.com/deckhouse/virtualization/src/cli/internal/cmd/lifecycle" "github.com/deckhouse/virtualization/src/cli/internal/cmd/portforward" "github.com/deckhouse/virtualization/src/cli/internal/cmd/scp" @@ -84,6 +85,7 @@ func NewCommand(programName string) *cobra.Command { virtCmd.AddCommand( console.NewCommand(), + debugbundle.NewCommand(), vnc.NewCommand(), portforward.NewCommand(), ssh.NewCommand(), From a564643316a73a91e9ad2879e4c00a58b5e099fb Mon Sep 17 00:00:00 2001 From: Pavel Tishkov Date: Thu, 4 Dec 2025 21:01:51 +0300 Subject: [PATCH 2/4] feat(cli): refactor Signed-off-by: Pavel Tishkov --- .../internal/cmd/debugbundle/collectors.go | 82 ++++++++++--------- 1 file changed, 45 insertions(+), 37 deletions(-) diff --git a/src/cli/internal/cmd/debugbundle/collectors.go b/src/cli/internal/cmd/debugbundle/collectors.go index 4f57c5bf33..431f1d536b 100644 --- a/src/cli/internal/cmd/debugbundle/collectors.go +++ b/src/cli/internal/cmd/debugbundle/collectors.go @@ -41,6 +41,16 @@ const ( coreAPIVersion = "v1" ) +var coreKinds = map[string]bool{ + "Pod": true, + "PersistentVolumeClaim": true, + "PersistentVolume": true, + "Event": true, + "Service": true, + "ConfigMap": true, + "Secret": true, +} + func (b *DebugBundle) collectVMResources(ctx context.Context, client kubeclient.Client, namespace, vmName string) error { // Get VM vm, err := client.VirtualMachines(namespace).Get(ctx, vmName, metav1.GetOptions{}) @@ -240,7 +250,10 @@ func (b *DebugBundle) collectPods(ctx context.Context, client kubeclient.Client, return err } + // Collect VM pods and their UIDs for finding dependent pods + vmPodUIDs := make(map[string]bool) for _, pod := range pods.Items { + vmPodUIDs[string(pod.UID)] = true b.outputResource("Pod", pod.Name, namespace, &pod) b.collectEvents(ctx, client, namespace, "Pod", pod.Name) @@ -249,6 +262,35 @@ func (b *DebugBundle) collectPods(ctx context.Context, client kubeclient.Client, } } + // Collect pods that have ownerReference to VM pods (e.g., hotplug volume pods) + if len(vmPodUIDs) > 0 { + allPods, err := client.CoreV1().Pods(namespace).List(ctx, metav1.ListOptions{}) + if err != nil { + // If we can't list all pods, continue without dependent pods + if !b.handleError("Pod", namespace, err) { + return err + } + } else { + for _, pod := range allPods.Items { + // Skip VM pods we already collected + if vmPodUIDs[string(pod.UID)] { + continue + } + // Check if this pod has ownerReference to any VM pod + for _, ownerRef := range pod.OwnerReferences { + if ownerRef.Kind == "Pod" && vmPodUIDs[string(ownerRef.UID)] { + b.outputResource("Pod", pod.Name, namespace, &pod) + b.collectEvents(ctx, client, namespace, "Pod", pod.Name) + if b.saveLogs { + b.collectSinglePodLogs(ctx, client, namespace, pod.Name, false) + } + break + } + } + } + } + } + return nil } @@ -341,26 +383,7 @@ func (b *DebugBundle) getInternalResourceList(ctx context.Context, resource, nam } func (b *DebugBundle) outputResource(kind, name, namespace string, obj runtime.Object) error { - // Check if object is unstructured and prepare it unstructuredObj, isUnstructured := obj.(*unstructured.Unstructured) - if isUnstructured { - // For unstructured objects, ensure kind is set if missing - // apiVersion should already be present from cluster, but will be handled below if missing - if unstructuredObj.GetKind() == "" { - unstructuredObj.SetKind(kind) - } - } else { - // For typed objects, ensure GVK is set - // TypeMeta is embedded in the struct, but we need to ensure GVK is set - // so that it's properly serialized - gvk := obj.GetObjectKind().GroupVersionKind() - if gvk.Kind == "" { - gvk.Kind = kind - } - // If Group or Version is empty, try to infer from the object type - // Objects from cluster should have GVK set, but if not, we'll add it during JSON processing - obj.GetObjectKind().SetGroupVersionKind(gvk) - } // Output separator if not first resource if b.resourceCount > 0 { @@ -369,9 +392,6 @@ func (b *DebugBundle) outputResource(kind, name, namespace string, obj runtime.O b.resourceCount++ // Convert to JSON first to preserve all fields including TypeMeta (kind, apiVersion, spec, status, etc.) - // For typed objects, TypeMeta is embedded in the struct with json:",inline" tag, - // so json.Marshal should include it. However, if TypeMeta fields are empty, - // they might be omitted due to omitempty tags. We ensure GVK is set above. jsonBytes, err := json.Marshal(obj) if err != nil { return fmt.Errorf("failed to marshal %s/%s to JSON: %w", kind, name, err) @@ -413,22 +433,10 @@ func (b *DebugBundle) outputResource(kind, name, namespace string, obj runtime.O if apiVersion != "" && apiVersion != "/" { jsonObj["apiVersion"] = apiVersion needsUpdate = true - } else { + } else if coreKinds[kind] { // Fallback: for core Kubernetes resources, use "v1" - // This handles cases where GVK is not set for core resources - coreKinds := map[string]bool{ - "Pod": true, - "PersistentVolumeClaim": true, - "PersistentVolume": true, - "Event": true, - "Service": true, - "ConfigMap": true, - "Secret": true, - } - if coreKinds[kind] { - jsonObj["apiVersion"] = coreAPIVersion - needsUpdate = true - } + jsonObj["apiVersion"] = coreAPIVersion + needsUpdate = true } } // Ensure kind is also present From 582b61a7672c4ccd2ada67f60d3a6c5cbd33c1c4 Mon Sep 17 00:00:00 2001 From: Pavel Tishkov Date: Thu, 4 Dec 2025 21:20:32 +0300 Subject: [PATCH 3/4] feat(cli): refactor Signed-off-by: Pavel Tishkov --- .../cmd/collectdebuginfo/collectdebuginfo.go | 130 ++++++++++++++++++ .../collectors.go | 41 ++---- .../internal/cmd/debugbundle/debugbundle.go | 41 ++---- src/cli/pkg/command/virtualization.go | 4 +- 4 files changed, 156 insertions(+), 60 deletions(-) create mode 100644 src/cli/internal/cmd/collectdebuginfo/collectdebuginfo.go rename src/cli/internal/cmd/{debugbundle => collectdebuginfo}/collectors.go (92%) diff --git a/src/cli/internal/cmd/collectdebuginfo/collectdebuginfo.go b/src/cli/internal/cmd/collectdebuginfo/collectdebuginfo.go new file mode 100644 index 0000000000..861660ef66 --- /dev/null +++ b/src/cli/internal/cmd/collectdebuginfo/collectdebuginfo.go @@ -0,0 +1,130 @@ +/* +Copyright 2025 Flant JSC + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package collectdebuginfo + +import ( + "context" + "fmt" + "io" + + "github.com/spf13/cobra" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/client-go/dynamic" + + "github.com/deckhouse/virtualization/api/client/kubeclient" + "github.com/deckhouse/virtualization/src/cli/internal/clientconfig" + "github.com/deckhouse/virtualization/src/cli/internal/templates" +) + +func NewCommand() *cobra.Command { + bundle := &DebugBundle{} + cmd := &cobra.Command{ + Use: "collect-debug-info (VirtualMachine)", + Short: "Collect debug information for VM: configuration, events, and logs.", + Example: usage(), + Args: templates.ExactArgs("collect-debug-info", 1), + RunE: bundle.Run, + } + + cmd.Flags().BoolVar(&bundle.saveLogs, "with-logs", false, "Include pod logs in output") + cmd.Flags().BoolVar(&bundle.debug, "debug", false, "Enable debug output for permission errors") + cmd.SetUsageTemplate(templates.UsageTemplate()) + return cmd +} + +type DebugBundle struct { + saveLogs bool + debug bool + dynamicClient dynamic.Interface + stdout io.Writer + stderr io.Writer + resourceCount int +} + +func usage() string { + return ` # Collect debug info for VirtualMachine 'myvm': + {{ProgramName}} collect-debug-info myvm + {{ProgramName}} collect-debug-info myvm.mynamespace + {{ProgramName}} collect-debug-info myvm -n mynamespace + # Include pod logs: + {{ProgramName}} collect-debug-info --with-logs myvm` +} + +func (b *DebugBundle) Run(cmd *cobra.Command, args []string) error { + client, defaultNamespace, _, err := clientconfig.ClientAndNamespaceFromContext(cmd.Context()) + if err != nil { + return err + } + + namespace, name, err := templates.ParseTarget(args[0]) + if err != nil { + return err + } + if namespace == "" { + namespace = defaultNamespace + } + + // Get dynamic client for internal resources + config, err := clientconfig.GetRESTConfig(cmd.Context()) + if err != nil { + return fmt.Errorf("failed to get REST config: %w", err) + } + b.dynamicClient, err = dynamic.NewForConfig(config) + if err != nil { + return fmt.Errorf("failed to create dynamic client: %w", err) + } + + // Set output writers + b.stdout = cmd.OutOrStdout() + b.stderr = cmd.ErrOrStderr() + + // Collect and output resources immediately + if err := b.collectResources(cmd.Context(), client, namespace, name); err != nil { + return err + } + + return nil +} + +func (b *DebugBundle) collectResources(ctx context.Context, client kubeclient.Client, namespace, vmName string) error { + // Collect VM and core resources + if err := b.collectVMResources(ctx, client, namespace, vmName); err != nil { + return fmt.Errorf("failed to collect VM resources: %w", err) + } + + // Collect block devices + if err := b.collectBlockDevices(ctx, client, namespace, vmName); err != nil { + return fmt.Errorf("failed to collect block devices: %w", err) + } + + // Collect pods (and logs if requested) + if err := b.collectPods(ctx, client, namespace, vmName); err != nil { + return fmt.Errorf("failed to collect pods: %w", err) + } + + return nil +} + +func (b *DebugBundle) handleError(resourceType, resourceName string, err error) bool { + if errors.IsForbidden(err) || errors.IsUnauthorized(err) { + if b.debug { + fmt.Fprintf(b.stderr, "Warning: Skipping %s/%s: permission denied\n", resourceType, resourceName) + } + return true // Skip this resource + } + return false // Don't skip, propagate error +} diff --git a/src/cli/internal/cmd/debugbundle/collectors.go b/src/cli/internal/cmd/collectdebuginfo/collectors.go similarity index 92% rename from src/cli/internal/cmd/debugbundle/collectors.go rename to src/cli/internal/cmd/collectdebuginfo/collectors.go index 431f1d536b..9b8ff0a429 100644 --- a/src/cli/internal/cmd/debugbundle/collectors.go +++ b/src/cli/internal/cmd/collectdebuginfo/collectors.go @@ -14,7 +14,7 @@ See the License for the specific language governing permissions and limitations under the License. */ -package debugbundle +package collectdebuginfo import ( "context" @@ -37,7 +37,6 @@ import ( const ( internalAPIGroup = "internal.virtualization.deckhouse.io" internalAPIVersion = "v1" - systemNamespace = "d8-virtualization" coreAPIVersion = "v1" ) @@ -258,7 +257,7 @@ func (b *DebugBundle) collectPods(ctx context.Context, client kubeclient.Client, b.collectEvents(ctx, client, namespace, "Pod", pod.Name) if b.saveLogs { - b.collectSinglePodLogs(ctx, client, namespace, pod.Name, false) + b.collectSinglePodLogs(ctx, client, namespace, pod.Name) } } @@ -282,7 +281,7 @@ func (b *DebugBundle) collectPods(ctx context.Context, client kubeclient.Client, b.outputResource("Pod", pod.Name, namespace, &pod) b.collectEvents(ctx, client, namespace, "Pod", pod.Name) if b.saveLogs { - b.collectSinglePodLogs(ctx, client, namespace, pod.Name, false) + b.collectSinglePodLogs(ctx, client, namespace, pod.Name) } break } @@ -294,33 +293,15 @@ func (b *DebugBundle) collectPods(ctx context.Context, client kubeclient.Client, return nil } -func (b *DebugBundle) collectSystemLogs(ctx context.Context, client kubeclient.Client) error { - pods, err := client.CoreV1().Pods(systemNamespace).List(ctx, metav1.ListOptions{}) - if err != nil { - if b.handleError("Pod", systemNamespace, err) { - return nil - } - return err - } - - for _, pod := range pods.Items { - b.collectSinglePodLogs(ctx, client, systemNamespace, pod.Name, true) - } - return nil -} - -func (b *DebugBundle) collectSinglePodLogs(ctx context.Context, client kubeclient.Client, namespace, podName string, isSystem bool) { +func (b *DebugBundle) collectSinglePodLogs(ctx context.Context, client kubeclient.Client, namespace, podName string) { logPrefix := fmt.Sprintf("%s/%s", namespace, podName) - if isSystem { - logPrefix = fmt.Sprintf("system/%s/%s", namespace, podName) - } // Get current logs req := client.CoreV1().Pods(namespace).GetLogs(podName, &corev1.PodLogOptions{}) if logStream, err := req.Stream(ctx); err == nil { if logContent, err := io.ReadAll(logStream); err == nil { - fmt.Fprintf(b.stderr, "\n# %s\n", logPrefix) - fmt.Fprintf(b.stderr, "%s\n", string(logContent)) + fmt.Fprintf(b.stdout, "\n# %s\n", logPrefix) + fmt.Fprintf(b.stdout, "%s\n", string(logContent)) } logStream.Close() } @@ -329,8 +310,8 @@ func (b *DebugBundle) collectSinglePodLogs(ctx context.Context, client kubeclien req = client.CoreV1().Pods(namespace).GetLogs(podName, &corev1.PodLogOptions{Previous: true}) if logStream, err := req.Stream(ctx); err == nil { if logContent, err := io.ReadAll(logStream); err == nil { - fmt.Fprintf(b.stderr, "\n# %s (previous)\n", logPrefix) - fmt.Fprintf(b.stderr, "%s\n", string(logContent)) + fmt.Fprintf(b.stdout, "\n# %s (previous)\n", logPrefix) + fmt.Fprintf(b.stdout, "%s\n", string(logContent)) } logStream.Close() } @@ -460,11 +441,7 @@ func (b *DebugBundle) outputResource(kind, name, namespace string, obj runtime.O } // Output comment and full YAML resource - fmt.Fprintf(b.stdout, "# %s: %s", kind, name) - if namespace != "" { - fmt.Fprintf(b.stdout, " (namespace: %s)", namespace) - } - fmt.Fprintf(b.stdout, "\n%s", string(yamlBytes)) + fmt.Fprintf(b.stdout, "# %d. %s: %s\n%s", b.resourceCount, kind, name, string(yamlBytes)) return nil } diff --git a/src/cli/internal/cmd/debugbundle/debugbundle.go b/src/cli/internal/cmd/debugbundle/debugbundle.go index 40cb167fb6..f99c33d041 100644 --- a/src/cli/internal/cmd/debugbundle/debugbundle.go +++ b/src/cli/internal/cmd/debugbundle/debugbundle.go @@ -33,39 +33,35 @@ import ( func NewCommand() *cobra.Command { bundle := &DebugBundle{} cmd := &cobra.Command{ - Use: "debug-bundle (VirtualMachine)", - Short: "Create a debug bundle with VM configuration, events, and logs.", + Use: "collect-debug-info (VirtualMachine)", + Short: "Collect debug information for VM: configuration, events, and logs.", Example: usage(), - Args: templates.ExactArgs("debug-bundle", 1), + Args: templates.ExactArgs("collect-debug-info", 1), RunE: bundle.Run, } - cmd.Flags().BoolVar(&bundle.saveLogs, "save-logs", false, "Save pod logs to output") - cmd.Flags().BoolVar(&bundle.saveSystemLogs, "save-system-logs", false, "Save system component logs to output") + cmd.Flags().BoolVar(&bundle.saveLogs, "with-logs", false, "Include pod logs in output") cmd.Flags().BoolVar(&bundle.debug, "debug", false, "Enable debug output for permission errors") cmd.SetUsageTemplate(templates.UsageTemplate()) return cmd } type DebugBundle struct { - saveLogs bool - saveSystemLogs bool - debug bool - dynamicClient dynamic.Interface - stdout io.Writer - stderr io.Writer - resourceCount int + saveLogs bool + debug bool + dynamicClient dynamic.Interface + stdout io.Writer + stderr io.Writer + resourceCount int } func usage() string { - return ` # Create debug bundle for VirtualMachine 'myvm': - {{ProgramName}} debug-bundle myvm - {{ProgramName}} debug-bundle myvm.mynamespace - {{ProgramName}} debug-bundle myvm -n mynamespace + return ` # Collect debug info for VirtualMachine 'myvm': + {{ProgramName}} collect-debug-info myvm + {{ProgramName}} collect-debug-info myvm.mynamespace + {{ProgramName}} collect-debug-info myvm -n mynamespace # Include pod logs: - {{ProgramName}} debug-bundle --save-logs myvm - # Include system component logs: - {{ProgramName}} debug-bundle --save-system-logs myvm` + {{ProgramName}} collect-debug-info --with-logs myvm` } func (b *DebugBundle) Run(cmd *cobra.Command, args []string) error { @@ -120,13 +116,6 @@ func (b *DebugBundle) collectResources(ctx context.Context, client kubeclient.Cl return fmt.Errorf("failed to collect pods: %w", err) } - // Collect system logs if requested - if b.saveSystemLogs { - if err := b.collectSystemLogs(ctx, client); err != nil { - return fmt.Errorf("failed to collect system logs: %w", err) - } - } - return nil } diff --git a/src/cli/pkg/command/virtualization.go b/src/cli/pkg/command/virtualization.go index ac884a8f6f..539ae192c4 100644 --- a/src/cli/pkg/command/virtualization.go +++ b/src/cli/pkg/command/virtualization.go @@ -31,8 +31,8 @@ import ( "github.com/deckhouse/virtualization/api/client/kubeclient" "github.com/deckhouse/virtualization/src/cli/internal/clientconfig" + "github.com/deckhouse/virtualization/src/cli/internal/cmd/collectdebuginfo" "github.com/deckhouse/virtualization/src/cli/internal/cmd/console" - "github.com/deckhouse/virtualization/src/cli/internal/cmd/debugbundle" "github.com/deckhouse/virtualization/src/cli/internal/cmd/lifecycle" "github.com/deckhouse/virtualization/src/cli/internal/cmd/portforward" "github.com/deckhouse/virtualization/src/cli/internal/cmd/scp" @@ -85,7 +85,7 @@ func NewCommand(programName string) *cobra.Command { virtCmd.AddCommand( console.NewCommand(), - debugbundle.NewCommand(), + collectdebuginfo.NewCommand(), vnc.NewCommand(), portforward.NewCommand(), ssh.NewCommand(), From 048f283f508dbf898dc0100f2bfdce119420973b Mon Sep 17 00:00:00 2001 From: Pavel Tishkov Date: Thu, 4 Dec 2025 21:58:19 +0300 Subject: [PATCH 4/4] feat(cli): refactor Signed-off-by: Pavel Tishkov --- .../cmd/collectdebuginfo/collectors.go | 156 +++++++----------- .../internal/cmd/debugbundle/debugbundle.go | 130 --------------- 2 files changed, 58 insertions(+), 228 deletions(-) delete mode 100644 src/cli/internal/cmd/debugbundle/debugbundle.go diff --git a/src/cli/internal/cmd/collectdebuginfo/collectors.go b/src/cli/internal/cmd/collectdebuginfo/collectors.go index 9b8ff0a429..df817c3975 100644 --- a/src/cli/internal/cmd/collectdebuginfo/collectors.go +++ b/src/cli/internal/cmd/collectdebuginfo/collectors.go @@ -34,22 +34,15 @@ import ( "github.com/deckhouse/virtualization/api/core/v1alpha2" ) -const ( - internalAPIGroup = "internal.virtualization.deckhouse.io" - internalAPIVersion = "v1" - coreAPIVersion = "v1" -) - var coreKinds = map[string]bool{ "Pod": true, "PersistentVolumeClaim": true, "PersistentVolume": true, "Event": true, - "Service": true, - "ConfigMap": true, - "Secret": true, } +// Resource collection functions + func (b *DebugBundle) collectVMResources(ctx context.Context, client kubeclient.Client, namespace, vmName string) error { // Get VM vm, err := client.VirtualMachines(namespace).Get(ctx, vmName, metav1.GetOptions{}) @@ -229,7 +222,6 @@ func (b *DebugBundle) collectBlockDevice(ctx context.Context, client kubeclient. return err } b.outputResource("ClusterVirtualImage", name, "", cvi) - // ClusterVirtualImage doesn't have events in namespace default: return fmt.Errorf("unknown block device kind: %s", kind) @@ -293,8 +285,29 @@ func (b *DebugBundle) collectPods(ctx context.Context, client kubeclient.Client, return nil } +// Event collection functions + +func (b *DebugBundle) collectEvents(ctx context.Context, client kubeclient.Client, namespace, resourceType, resourceName string) { + events, err := client.CoreV1().Events(namespace).List(ctx, metav1.ListOptions{ + FieldSelector: fmt.Sprintf("involvedObject.name=%s", resourceName), + }) + if err != nil { + if b.handleError("Event", resourceName, err) { + return + } + return + } + + // Add each event individually to preserve TypeMeta + for i := range events.Items { + b.outputResource("Event", fmt.Sprintf("%s-%s-%d", strings.ToLower(resourceType), resourceName, i), namespace, &events.Items[i]) + } +} + +// Log collection functions + func (b *DebugBundle) collectSinglePodLogs(ctx context.Context, client kubeclient.Client, namespace, podName string) { - logPrefix := fmt.Sprintf("%s/%s", namespace, podName) + logPrefix := fmt.Sprintf("logs %s/%s", namespace, podName) // Get current logs req := client.CoreV1().Pods(namespace).GetLogs(podName, &corev1.PodLogOptions{}) @@ -317,33 +330,14 @@ func (b *DebugBundle) collectSinglePodLogs(ctx context.Context, client kubeclien } } -func (b *DebugBundle) collectEvents(ctx context.Context, client kubeclient.Client, namespace, resourceType, resourceName string) { - events, err := client.CoreV1().Events(namespace).List(ctx, metav1.ListOptions{ - FieldSelector: fmt.Sprintf("involvedObject.name=%s", resourceName), - }) - if err != nil { - if b.handleError("Event", resourceName, err) { - return - } - return - } - - // Add each event individually to preserve TypeMeta - for i := range events.Items { - b.outputResource("Event", fmt.Sprintf("%s-%s-%d", strings.ToLower(resourceType), resourceName, i), namespace, &events.Items[i]) - } -} - -func (b *DebugBundle) getInternalGVR(resource string) schema.GroupVersionResource { - return schema.GroupVersionResource{ - Group: internalAPIGroup, - Version: internalAPIVersion, - Resource: resource, - } -} +// Helper functions func (b *DebugBundle) getInternalResource(ctx context.Context, resource, namespace, name string) (*unstructured.Unstructured, error) { - obj, err := b.dynamicClient.Resource(b.getInternalGVR(resource)).Namespace(namespace).Get(ctx, name, metav1.GetOptions{}) + obj, err := b.dynamicClient.Resource(schema.GroupVersionResource{ + Group: "internal.virtualization.deckhouse.io", + Version: "v1", + Resource: resource, + }).Namespace(namespace).Get(ctx, name, metav1.GetOptions{}) if err != nil { return nil, err } @@ -351,7 +345,11 @@ func (b *DebugBundle) getInternalResource(ctx context.Context, resource, namespa } func (b *DebugBundle) getInternalResourceList(ctx context.Context, resource, namespace string) ([]*unstructured.Unstructured, error) { - list, err := b.dynamicClient.Resource(b.getInternalGVR(resource)).Namespace(namespace).List(ctx, metav1.ListOptions{}) + list, err := b.dynamicClient.Resource(schema.GroupVersionResource{ + Group: "internal.virtualization.deckhouse.io", + Version: "v1", + Resource: resource, + }).Namespace(namespace).List(ctx, metav1.ListOptions{}) if err != nil { return nil, err } @@ -364,83 +362,45 @@ func (b *DebugBundle) getInternalResourceList(ctx context.Context, resource, nam } func (b *DebugBundle) outputResource(kind, name, namespace string, obj runtime.Object) error { - unstructuredObj, isUnstructured := obj.(*unstructured.Unstructured) - // Output separator if not first resource if b.resourceCount > 0 { fmt.Fprintf(b.stdout, "\n---\n") } b.resourceCount++ - // Convert to JSON first to preserve all fields including TypeMeta (kind, apiVersion, spec, status, etc.) - jsonBytes, err := json.Marshal(obj) - if err != nil { - return fmt.Errorf("failed to marshal %s/%s to JSON: %w", kind, name, err) + // Ensure Kind is set from input if missing + gvk := obj.GetObjectKind().GroupVersionKind() + if gvk.Kind == "" { + gvk.Kind = kind + obj.GetObjectKind().SetGroupVersionKind(gvk) } - // Always parse JSON and ensure apiVersion and kind are present - // This handles cases where TypeMeta might not be serialized properly - var jsonObj map[string]interface{} - if err := json.Unmarshal(jsonBytes, &jsonObj); err == nil { - needsUpdate := false - // If apiVersion is missing, try to get it from the object itself - if _, ok := jsonObj["apiVersion"]; !ok { - var apiVersion string - - // For unstructured objects, get apiVersion directly - if isUnstructured { - apiVersion = unstructuredObj.GetAPIVersion() - } else { - // For typed objects, get apiVersion from GVK - // Objects from cluster should have GVK set correctly - gvk := obj.GetObjectKind().GroupVersionKind() - - // If GVK is empty, try to get it from scheme - if gvk.Kind == "" || (gvk.Group == "" && gvk.Version == "") { - // Try to get GVK from scheme - if gvks, _, err := kubeclient.Scheme.ObjectKinds(obj); err == nil && len(gvks) > 0 { - gvk = gvks[0] - // Set GVK on the object for future use - obj.GetObjectKind().SetGroupVersionKind(gvk) - } - } - - // Use GroupVersion().String() which automatically formats as "group/version" or "version" - // This works for both custom resources (group/version) and core resources (version only) - apiVersion = gvk.GroupVersion().String() - } - - // If we got a valid apiVersion, use it - if apiVersion != "" && apiVersion != "/" { - jsonObj["apiVersion"] = apiVersion - needsUpdate = true - } else if coreKinds[kind] { - // Fallback: for core Kubernetes resources, use "v1" - jsonObj["apiVersion"] = coreAPIVersion - needsUpdate = true - } - } - // Ensure kind is also present - if _, ok := jsonObj["kind"]; !ok { - jsonObj["kind"] = kind - needsUpdate = true - } - // Re-marshal with apiVersion and kind if needed - if needsUpdate { - jsonBytes, err = json.Marshal(jsonObj) - if err != nil { - return fmt.Errorf("failed to re-marshal %s/%s to JSON: %w", kind, name, err) - } + // If GroupVersion is missing/empty, try to get from scheme + if gvk.GroupVersion().Empty() { + gvks, _, err := kubeclient.Scheme.ObjectKinds(obj) + if err == nil && len(gvks) > 0 { + gvk = gvks[0] + obj.GetObjectKind().SetGroupVersionKind(gvk) + } else if coreKinds[kind] { + // Fallback for core Kubernetes resources if scheme doesn't know about them + gvk = schema.GroupVersionKind{Group: "", Version: "v1", Kind: kind} + obj.GetObjectKind().SetGroupVersionKind(gvk) } } - // Convert JSON to YAML - this preserves all fields + // Marshal to JSON (now with TypeMeta if set) + jsonBytes, err := json.Marshal(obj) + if err != nil { + return fmt.Errorf("failed to marshal %s/%s to JSON: %w", kind, name, err) + } + + // Convert to YAML yamlBytes, err := yaml.JSONToYAML(jsonBytes) if err != nil { return fmt.Errorf("failed to convert %s/%s to YAML: %w", kind, name, err) } - // Output comment and full YAML resource + // Output fmt.Fprintf(b.stdout, "# %d. %s: %s\n%s", b.resourceCount, kind, name, string(yamlBytes)) return nil diff --git a/src/cli/internal/cmd/debugbundle/debugbundle.go b/src/cli/internal/cmd/debugbundle/debugbundle.go deleted file mode 100644 index f99c33d041..0000000000 --- a/src/cli/internal/cmd/debugbundle/debugbundle.go +++ /dev/null @@ -1,130 +0,0 @@ -/* -Copyright 2025 Flant JSC - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package debugbundle - -import ( - "context" - "fmt" - "io" - - "github.com/spf13/cobra" - "k8s.io/apimachinery/pkg/api/errors" - "k8s.io/client-go/dynamic" - - "github.com/deckhouse/virtualization/api/client/kubeclient" - "github.com/deckhouse/virtualization/src/cli/internal/clientconfig" - "github.com/deckhouse/virtualization/src/cli/internal/templates" -) - -func NewCommand() *cobra.Command { - bundle := &DebugBundle{} - cmd := &cobra.Command{ - Use: "collect-debug-info (VirtualMachine)", - Short: "Collect debug information for VM: configuration, events, and logs.", - Example: usage(), - Args: templates.ExactArgs("collect-debug-info", 1), - RunE: bundle.Run, - } - - cmd.Flags().BoolVar(&bundle.saveLogs, "with-logs", false, "Include pod logs in output") - cmd.Flags().BoolVar(&bundle.debug, "debug", false, "Enable debug output for permission errors") - cmd.SetUsageTemplate(templates.UsageTemplate()) - return cmd -} - -type DebugBundle struct { - saveLogs bool - debug bool - dynamicClient dynamic.Interface - stdout io.Writer - stderr io.Writer - resourceCount int -} - -func usage() string { - return ` # Collect debug info for VirtualMachine 'myvm': - {{ProgramName}} collect-debug-info myvm - {{ProgramName}} collect-debug-info myvm.mynamespace - {{ProgramName}} collect-debug-info myvm -n mynamespace - # Include pod logs: - {{ProgramName}} collect-debug-info --with-logs myvm` -} - -func (b *DebugBundle) Run(cmd *cobra.Command, args []string) error { - client, defaultNamespace, _, err := clientconfig.ClientAndNamespaceFromContext(cmd.Context()) - if err != nil { - return err - } - - namespace, name, err := templates.ParseTarget(args[0]) - if err != nil { - return err - } - if namespace == "" { - namespace = defaultNamespace - } - - // Get dynamic client for internal resources - config, err := clientconfig.GetRESTConfig(cmd.Context()) - if err != nil { - return fmt.Errorf("failed to get REST config: %w", err) - } - b.dynamicClient, err = dynamic.NewForConfig(config) - if err != nil { - return fmt.Errorf("failed to create dynamic client: %w", err) - } - - // Set output writers - b.stdout = cmd.OutOrStdout() - b.stderr = cmd.ErrOrStderr() - - // Collect and output resources immediately - if err := b.collectResources(cmd.Context(), client, namespace, name); err != nil { - return err - } - - return nil -} - -func (b *DebugBundle) collectResources(ctx context.Context, client kubeclient.Client, namespace, vmName string) error { - // Collect VM and core resources - if err := b.collectVMResources(ctx, client, namespace, vmName); err != nil { - return fmt.Errorf("failed to collect VM resources: %w", err) - } - - // Collect block devices - if err := b.collectBlockDevices(ctx, client, namespace, vmName); err != nil { - return fmt.Errorf("failed to collect block devices: %w", err) - } - - // Collect pods (and logs if requested) - if err := b.collectPods(ctx, client, namespace, vmName); err != nil { - return fmt.Errorf("failed to collect pods: %w", err) - } - - return nil -} - -func (b *DebugBundle) handleError(resourceType, resourceName string, err error) bool { - if errors.IsForbidden(err) || errors.IsUnauthorized(err) { - if b.debug { - fmt.Fprintf(b.stderr, "Warning: Skipping %s/%s: permission denied\n", resourceType, resourceName) - } - return true // Skip this resource - } - return false // Don't skip, propagate error -}