diff --git a/Containerfile.agent b/Containerfile.agent index 9399180..967a55c 100644 --- a/Containerfile.agent +++ b/Containerfile.agent @@ -15,7 +15,7 @@ RUN go mod download COPY . . USER 0 -RUN CGO_ENABLED=0 GOOS=linux GOARCH=amd64 go build -buildvcs=false -o /planner-agent cmd/planner-agent/main.go +RUN CGO_ENABLED=1 GOOS=linux GOARCH=amd64 go build -buildvcs=false -o /planner-agent cmd/planner-agent/main.go FROM registry.access.redhat.com/ubi9/ubi-micro diff --git a/Containerfile.collector b/Containerfile.collector deleted file mode 100644 index c3b3a79..0000000 --- a/Containerfile.collector +++ /dev/null @@ -1,25 +0,0 @@ -# Builder container -FROM registry.access.redhat.com/ubi9/go-toolset as builder - -WORKDIR /app -COPY go.mod go.sum ./ -RUN go mod download - -COPY . . - -USER 0 -RUN CGO_ENABLED=1 GOOS=linux go build -buildvcs=false -o /collector cmd/collector/main.go - -FROM registry.access.redhat.com/ubi9/ubi-micro - -WORKDIR /app - -COPY --from=builder /collector /app/ - -# Use non-root user -RUN chown -R 1001:0 /app -USER 1001 - -# Run the server -EXPOSE 3333 -ENTRYPOINT ["/app/collector"] diff --git a/Makefile b/Makefile index da5f023..1cb2cc1 100644 --- a/Makefile +++ b/Makefile @@ -7,7 +7,6 @@ GO_CACHE := -v $${HOME}/go/migration-planner-go-cache:/opt/app-root/src/go:Z -v TIMEOUT ?= 30m VERBOSE ?= false MIGRATION_PLANNER_AGENT_IMAGE ?= quay.io/kubev2v/migration-planner-agent -MIGRATION_PLANNER_COLLECTOR_IMAGE ?= quay.io/kubev2v/migration-planner-collector MIGRATION_PLANNER_API_IMAGE ?= quay.io/kubev2v/migration-planner-api MIGRATION_PLANNER_UI_IMAGE ?= quay.io/kubev2v/migration-planner-ui DOWNLOAD_RHCOS ?= true @@ -81,23 +80,18 @@ build-api: bin bin/.migration-planner-agent-container: bin Containerfile.agent go.mod go.sum $(GO_FILES) podman build -f Containerfile.agent -t $(MIGRATION_PLANNER_AGENT_IMAGE):latest -bin/.migration-planner-collector-container: bin Containerfile.collector go.mod go.sum $(GO_FILES) - podman build -f Containerfile.collector -t $(MIGRATION_PLANNER_COLLECTOR_IMAGE):latest - bin/.migration-planner-api-container: bin Containerfile.api go.mod go.sum $(GO_FILES) podman build -f Containerfile.api -t $(MIGRATION_PLANNER_API_IMAGE):latest migration-planner-api-container: bin/.migration-planner-api-container -migration-planner-collector-container: bin/.migration-planner-collector-container migration-planner-agent-container: bin/.migration-planner-agent-container -build-containers: migration-planner-api-container migration-planner-agent-container migration-planner-collector-container +build-containers: migration-planner-api-container migration-planner-agent-container .PHONY: build-containers push-containers: build-containers podman push $(MIGRATION_PLANNER_API_IMAGE):latest - podman push $(MIGRATION_PLANNER_COLLECTOR_IMAGE):latest podman push $(MIGRATION_PLANNER_AGENT_IMAGE):latest deploy-on-openshift: diff --git a/cmd/collector/README.md b/cmd/planner-agent/COLLECTOR.md similarity index 56% rename from cmd/collector/README.md rename to cmd/planner-agent/COLLECTOR.md index 4e2e978..1fe22be 100644 --- a/cmd/collector/README.md +++ b/cmd/planner-agent/COLLECTOR.md @@ -4,11 +4,29 @@ To run the collector localy here are the steps. ## Prepare Prepare the dependencies. +### Configuration +Create the planner-agent configuration file: + +``` +$ mkdir /tmp/config +$ mkdir /tmp/data +$ cat < ~/.planner-agent/config.yaml +config-dir: /tmp/config +data-dir: /tmp/data +log-level: debug +source-id: 9195e61d-e56d-407d-8b29-ff2fb7986928 +update-interval: 5s +planner-service: + service: + server: http://127.0.0.1:7443 +EOF +``` + ### Credentials Create VMware credentials file. ``` -cat < /tmp/creds.json +cat < /tmp/data/creds.json { "username": "user@example.com", "password": "userpassword", @@ -28,7 +46,7 @@ podman run -p 8181:8181 -d --name opa --entrypoint '/usr/bin/opa' quay.io/kubev2 Build & run the collector code specifying credentials file as first argument and as second path to invetory file, where data should be written. ``` -go run cmd/collector/main.go /tmp/creds.json /tmp/inventory.json +go run cmd/planner-agent/main.go -config -config ~/.planner-agent/config.yaml ``` -Explore `/tmp/inventory.json` +Explore `/tmp/data/inventory.json` diff --git a/data/ignition.template b/data/ignition.template index 0dfba3f..0fb622e 100644 --- a/data/ignition.template +++ b/data/ignition.template @@ -22,19 +22,19 @@ storage: name: core group: name: core - - path: /home/core/vol + - path: /home/core/.migration-planner overwrite: true user: name: core group: name: core - - path: /home/core/vol/config + - path: /home/core/.migration-planner/config overwrite: true user: name: core group: name: core - - path: /home/core/vol/data + - path: /home/core/.migration.planner/data overwrite: true user: name: core @@ -46,7 +46,7 @@ storage: contents: inline: | PasswordAuthentication yes - - path: /home/core/vol/config.yaml + - path: /home/core/.migration-planner/config.yaml contents: inline: | config-dir: /agent/config @@ -63,7 +63,7 @@ storage: name: core group: name: core - - path: /home/core/.config/containers/systemd/collector.network + - path: /home/core/.config/containers/systemd/agent.network contents: inline: | [Network] @@ -71,42 +71,13 @@ storage: name: core group: name: core - - path: /home/core/.config/containers/systemd/planner.volume - contents: - inline: | - [Volume] - VolumeName=planner.volume - user: - name: core - group: - name: core - - path: /home/core/.config/containers/systemd/planner-setup.container - mode: 0644 - contents: - inline: | - [Unit] - Description=Prepare data volume for the container - Before=planner-agent.service - - [Container] - Image=registry.access.redhat.com/ubi9/ubi-micro - Exec=sh -c "cp -r /mnt/* /agent/ && chmod -R a+rwx /agent" - Volume=planner.volume:/agent - Volume=/home/core/vol:/mnt:Z - - [Service] - Type=oneshot - RemainAfterExit=yes - - [Install] - WantedBy=multi-user.target default.target - path: /home/core/.config/containers/systemd/planner-agent.container mode: 0644 contents: inline: | [Unit] Description=Planner agent quadlet - Wants=planner-setup.service + Wants=planner-agent-opa.service [Container] Image={{.MigrationPlannerAgentImage}} @@ -114,7 +85,10 @@ storage: AutoUpdate=registry Exec= -config /agent/config.yaml PublishPort=3333:3333 - Volume=planner.volume:/agent + Volume=/home/core/.migration-planner:/agent:Z + Environment=OPA_SERVER=opa:8181 + Network=agent.network + UserNS=keep-id:uid=1001 [Install] WantedBy=multi-user.target default.target @@ -123,8 +97,8 @@ storage: contents: inline: | [Unit] - Description=Collector quadlet - Before=planner-agent-collector.service + Description=OPA quadlet + Before=planner-agent.service [Container] ContainerName=opa @@ -132,30 +106,7 @@ storage: Entrypoint=/usr/bin/opa PublishPort=8181:8181 Exec=run --server /usr/share/opa/policies - Network=collector.network - - [Install] - WantedBy=multi-user.target default.target - - - path: /home/core/.config/containers/systemd/planner-agent-collector.container - mode: 0644 - contents: - inline: | - [Unit] - Description=Collector quadlet - Wants=planner-agent-opa.service - - [Container] - Image={{.MigrationPlannerCollectorImage}} - ContainerName=migration-planner-collector - AutoUpdate=registry - Exec=/vol/data/credentials.json /vol/data/inventory.json - Volume=planner.volume:/vol - Environment=OPA_SERVER=opa:8181 - Network=collector.network - - [Service] - Restart=on-failure + Network=agent.network [Install] WantedBy=multi-user.target default.target diff --git a/doc/agentvm.md b/doc/agentvm.md index b46e34b..686aa9c 100644 --- a/doc/agentvm.md +++ b/doc/agentvm.md @@ -1,25 +1,19 @@ # Agent virtual machine The agent, based on Red Hat CoreOS (RHCOS), communicates with the Agent Service and reports its status. -The agent virtual machine is initialized using ignition, which configures multiple containers that run as systemd services. Each of these services is dedicated to a specific function. +The agent virtual machine is initialized using ignition, which configures container that run as systemd service. ## Systemd services Follows the list of systemd services that can be found on agent virtual machine. All of the services -are defined as quadlets. Quadlet configuration can be found in the [ignition template file](../data/config.ign.template). -Agent dockerfile can be found [here](../Containerfile.agent), the collector containerfile is [here](../Containerfile.collector). - -### planner-setup -Planner-setup service is responsible for inicializing the volume with data, that are shared between `planner-agent` and `planner-agent-collector`. +are defined as quadlets. Quadlet configuration can be found in the [ignition template file](../data/ignition.template). +Agent dockerfile can be found [here](../Containerfile.agent). ### planner-agent -Planner-agent is a service that reports the status to the Agent service. The URL of the Agent service is configured in `$HOME/vol/config.yaml` file, which is injected via ignition. +Planner-agent is a service that reports the status to the Agent service. The URL of the Agent service is configured in `$HOME/.migration-planner/config.yaml` file, which is injected via ignition. -Planner-agent contains web application that is exposed via port 3333. Once user access the web app and enter the credentials of the vCenter, `credentials.json` file is created in the shared volume, and `planner-agent-collector` can be spawned. +Planner-agent contains web application that is exposed via port 3333. Once user access the web app and enter the credentials of the vCenter, `credentials.json` file is created, and goroutine is executed which fetch the data from the vCenter. The data are stored in `invetory.json` file. Once agent notice the file it will send them over to Agent service. ### planner-agent-opa -Planner-agent-opa is a service that re-uses [forklift validation](https://github.com/kubev2v/forklift/blob/main/validation/README.adoc) container. The forklift validation container is responsible for vCenter data validation. When `planner-agent-collector` fetch vCenter data it's validated against the OPA server and report is shared back to Agent Service. - -### planner-agent-collector -Planner-agent-collector service waits until user enter vCenter credentials, once credentials are entered the vCenter data are collected. The data are stored in `$HOME/vol/data/inventory.json`. Once `invetory.json` is created `planner-agent` service send the data over to Agent service. +Planner-agent-opa is a service that re-uses [forklift validation](https://github.com/kubev2v/forklift/blob/main/validation/README.adoc) container. The forklift validation container is responsible for vCenter data validation. When `planner-agent` fetch vCenter data it's validated against the OPA server and report is shared back to Agent Service. ### podman-auto-update Podman auto update is responsible for updating the image of containers in case there is a new release of the image. We use default `podman-auto-update.timer`, which executes `podman-auto-update` every 24hours. @@ -32,36 +26,21 @@ Usefull commands to troubleshoot Agent VM. Note that all the containers are runn $ podman ps ``` -### Checking the status of all our services -``` -$ systemctl --user status planner-* -``` - -### Inspecting the shared volume -We create a shared volume between containers, so we can share information between collector and agent container. -In order to expore the data stored in the volume find the mountpoint of the volume: -``` -$ podman volume inspect planner.volume | jq .[0].Mountpoint -``` - -And then you can explore relevant data. Like `config.yaml`, `credentials.json`, `inventory.json`, etc. +### Checking the status of planner-agent service ``` -$ ls /var/home/core/.local/share/containers/storage/volumes/planner.volume/_data -$ cat /var/home/core/.local/share/containers/storage/volumes/planner.volume/_data/config.yaml -$ cat /var/home/core/.local/share/containers/storage/volumes/planner.volume/_data/data/credentials.json -$ cat /var/home/core/.local/share/containers/storage/volumes/planner.volume/_data/data/inventory.json +$ systemctl --user status planner-agent ``` ### Inspecting the host directory with data -The ignition create a `vol` directory in `core` user home directory. +The ignition create a `.migration-planner` directory in `core` user home directory. This directory should contain all relevant data, so in order to find misconfiguration please search in this directory. ``` -$ ls -l vol +$ ls -l .migration-planner ``` ### Check logs of the services ``` -$ journalctl --user -f -u planner-* +$ journalctl --user -f -u planner-agent ``` ### Status is `Not connected` after VM is booted. diff --git a/doc/deployment.md b/doc/deployment.md index 544c1e7..c3fb720 100644 --- a/doc/deployment.md +++ b/doc/deployment.md @@ -31,8 +31,6 @@ Agent images are defined in the ignition file. So in order to modify the images ``` env: - - name: MIGRATION_PLANNER_COLLECTOR_IMAGE - value: quay.io/$USER/migration-planner-collector - name: MIGRATION_PLANNER_AGENT_IMAGE value: quay.io/$USER/migration-planner-agent -``` \ No newline at end of file +``` diff --git a/internal/agent/agent.go b/internal/agent/agent.go index a716c12..69419d2 100644 --- a/internal/agent/agent.go +++ b/internal/agent/agent.go @@ -92,6 +92,9 @@ func (a *Agent) Run(ctx context.Context) error { } healthChecker.Start(healthCheckCh) + collector := NewCollector(a.log, a.config.DataDir) + go collector.collect() + inventoryUpdater := NewInventoryUpdater(a.log, a.config, client) inventoryUpdater.UpdateServiceWithInventory(ctx) diff --git a/cmd/collector/main.go b/internal/agent/collector.go similarity index 87% rename from cmd/collector/main.go rename to internal/agent/collector.go index 8f22b7e..eb8bf99 100644 --- a/cmd/collector/main.go +++ b/internal/agent/collector.go @@ -1,11 +1,10 @@ -package main +package agent import ( "bytes" "encoding/json" "fmt" "io" - "log" "math" "net/http" "os" @@ -21,113 +20,112 @@ import ( libmodel "github.com/konveyor/forklift-controller/pkg/lib/inventory/model" apiplanner "github.com/kubev2v/migration-planner/api/v1alpha1" "github.com/kubev2v/migration-planner/internal/util" + "github.com/kubev2v/migration-planner/pkg/log" core "k8s.io/api/core/v1" meta "k8s.io/apimachinery/pkg/apis/meta/v1" ) -type VCenterCreds struct { - Url string `json:"url"` - Username string `json:"username"` - Password string `json:"password"` +type Collector struct { + log *log.PrefixLogger + dataDir string } -func main() { - logger := log.New(os.Stdout, "*********** Collector: ", log.Ldate|log.Ltime|log.Lshortfile) - // Parse command-line arguments - if len(os.Args) < 3 { - fmt.Println("Usage: collector ") - os.Exit(1) +func NewCollector(log *log.PrefixLogger, dataDir string) *Collector { + return &Collector{ + log: log, + dataDir: dataDir, } - credsFile := os.Args[1] - outputFile := os.Args[2] +} + +func (c *Collector) collect() { + credentialsFilePath := filepath.Join(c.dataDir, CredentialsFile) - logger.Println("Wait for credentials") - waitForFile(credsFile) + c.log.Infof("Wait for credentials") + waitForFile(credentialsFilePath) - logger.Println("Load credentials from file") - credsData, err := os.ReadFile(credsFile) + credsData, err := os.ReadFile(credentialsFilePath) if err != nil { - fmt.Printf("Error reading credentials file: %v\n", err) - os.Exit(1) + c.log.Errorf("Error reading credentials file: %v\n", err) + return } - var creds VCenterCreds + var creds Credentials if err := json.Unmarshal(credsData, &creds); err != nil { - fmt.Printf("Error parsing credentials JSON: %v\n", err) - os.Exit(1) + c.log.Errorf("Error parsing credentials JSON: %v\n", err) + return } opaServer := util.GetEnv("OPA_SERVER", "127.0.0.1:8181") - logger.Println("Create Provider") + c.log.Infof("Create Provider") provider := getProvider(creds) - logger.Println("Create Secret") + c.log.Infof("Create Secret") secret := getSecret(creds) - logger.Println("Check if opaServer is responding") + c.log.Infof("Check if opaServer is responding") resp, err := http.Get("http://" + opaServer + "/health") if err != nil || resp.StatusCode != http.StatusOK { - fmt.Println("OPA server " + opaServer + " is not responding") + c.log.Errorf("OPA server %s is not responding", opaServer) return } defer resp.Body.Close() - logger.Println("Create DB") + c.log.Infof("Create DB") db, err := createDB(provider) if err != nil { - fmt.Println("Error creating DB.", err) + c.log.Errorf("Error creating DB: %s", err) return } - logger.Println("vSphere collector") + c.log.Infof("vSphere collector") collector, err := createCollector(db, provider, secret) if err != nil { - fmt.Println("Error creating collector.", err) + c.log.Errorf("Error running collector: %s", err) return } defer collector.DB().Close(true) defer collector.Shutdown() - logger.Println("List VMs") + c.log.Infof("List VMs") vms := &[]vspheremodel.VM{} err = collector.DB().List(vms, libmodel.FilterOptions{Detail: 1}) if err != nil { - fmt.Println(err) + c.log.Errorf("Error list database: %s", err) return } - logger.Println("List Hosts") + c.log.Infof("List Hosts") hosts := &[]vspheremodel.Host{} err = collector.DB().List(hosts, libmodel.FilterOptions{Detail: 1}) if err != nil { - fmt.Println(err) + c.log.Errorf("Error list database: %s", err) return } - logger.Println("List Clusters") + c.log.Infof("List Clusters") clusters := &[]vspheremodel.Cluster{} err = collector.DB().List(clusters, libmodel.FilterOptions{Detail: 1}) if err != nil { - fmt.Println(err) + c.log.Errorf("Error list database: %s", err) return } - logger.Println("Create inventory") + c.log.Infof("Create inventory") inv := createBasicInventoryObj(vms, collector, hosts, clusters) - logger.Println("Run the validation of VMs") + c.log.Infof("Run the validation of VMs") vms, err = validation(vms, opaServer) if err != nil { - fmt.Println(err) + c.log.Errorf("Error running validation: %s", err) return } - logger.Println("Fill the inventory object with more data") + c.log.Infof("Fill the inventory object with more data") fillInventoryObjectWithMoreData(vms, inv) - logger.Println("Write the inventory to output file") - if err := createOuput(outputFile, inv); err != nil { - fmt.Println("Error writing output:", err) + c.log.Infof("Write the inventory to output file") + if err := createOuput(filepath.Join(c.dataDir, InventoryFile), inv); err != nil { + c.log.Errorf("Fill the inventory object with more data: %s", err) return } } @@ -214,17 +212,17 @@ func createBasicInventoryObj(vms *[]vspheremodel.VM, collector *vsphere.Collecto } } -func getProvider(creds VCenterCreds) *api.Provider { +func getProvider(creds Credentials) *api.Provider { vsphereType := api.VSphere return &api.Provider{ Spec: api.ProviderSpec{ - URL: creds.Url, + URL: creds.URL, Type: &vsphereType, }, } } -func getSecret(creds VCenterCreds) *core.Secret { +func getSecret(creds Credentials) *core.Secret { return &core.Secret{ ObjectMeta: meta.ObjectMeta{ Name: "vsphere-secret", @@ -560,8 +558,3 @@ type VMResult struct { type VMValidation struct { Result []VMResult `json:"result"` } - -type InventoryData struct { - Inventory apiplanner.Inventory `json:"inventory"` - Error string `json:"error"` -} diff --git a/internal/agent/inventory.go b/internal/agent/inventory.go index dd28b1b..b9aefb4 100644 --- a/internal/agent/inventory.go +++ b/internal/agent/inventory.go @@ -28,8 +28,8 @@ type InventoryUpdater struct { } type InventoryData struct { - Inventory api.Inventory - Error string + Inventory api.Inventory `json:"inventory"` + Error string `json:"error"` } func NewInventoryUpdater(log *log.PrefixLogger, config *Config, client client.Planner) *InventoryUpdater { diff --git a/internal/image/ova.go b/internal/image/ova.go index 519febe..e55b76b 100644 --- a/internal/image/ova.go +++ b/internal/image/ova.go @@ -29,11 +29,10 @@ type Ova struct { // IgnitionData defines modifiable fields in ignition config type IgnitionData struct { - SourceId string - SshKey string - PlannerService string - MigrationPlannerCollectorImage string - MigrationPlannerAgentImage string + SourceId string + SshKey string + PlannerService string + MigrationPlannerAgentImage string } type Image interface { @@ -123,11 +122,10 @@ func writeOvf(tw *tar.Writer) error { func (o *Ova) generateIgnition() (string, error) { ignData := IgnitionData{ - SourceId: o.Id.String(), - SshKey: o.SshKey, - PlannerService: util.GetEnv("CONFIG_SERVER", "http://127.0.0.1:7443"), - MigrationPlannerCollectorImage: util.GetEnv("MIGRATION_PLANNER_COLLECTOR_IMAGE", "quay.io/kubev2v/migration-planner-collector"), - MigrationPlannerAgentImage: util.GetEnv("MIGRATION_PLANNER_AGENT_IMAGE", "quay.io/kubev2v/migration-planner-agent"), + SourceId: o.Id.String(), + SshKey: o.SshKey, + PlannerService: util.GetEnv("CONFIG_SERVER", "http://127.0.0.1:7443"), + MigrationPlannerAgentImage: util.GetEnv("MIGRATION_PLANNER_AGENT_IMAGE", "quay.io/kubev2v/migration-planner-agent"), } var buf bytes.Buffer