From fdb4d84763b8b28f3dc94b1a2a0b0f60aae2eb55 Mon Sep 17 00:00:00 2001 From: George Hicken Date: Wed, 19 Apr 2023 13:41:39 +0000 Subject: [PATCH 1/8] vcsim: untangle container/VM to allow reuse of container logic for hosts Refactors the container logic out of the simulator VM file so it can be used for both VM and host container backings. The following file structure is now in place: * container.go - wraps docker operation execs * container_virtual_machine.go - orchestration of containers for VMs * container_host_system.go - orchestration of containers for Hosts * container_xxx_test.go - test for container backed VMs/Hosts Add CGO_ENABLED=1 to test command with -race --- Makefile | 2 +- simulator/container.go | 703 +++++++++++-------------- simulator/container_virtual_machine.go | 497 +++++++++++++++++ simulator/feature_test.go | 20 +- simulator/guest_operations_manager.go | 28 +- simulator/model.go | 4 +- simulator/virtual_machine.go | 29 +- 7 files changed, 849 insertions(+), 434 deletions(-) create mode 100644 simulator/container_virtual_machine.go diff --git a/Makefile b/Makefile index bd287802a..0f31649af 100644 --- a/Makefile +++ b/Makefile @@ -136,7 +136,7 @@ endif .PHONY: go-test go-test: ## Runs go unit tests with race detector enabled - GORACE=$(GORACE) $(GO) test \ + GORACE=$(GORACE) CGO_ENABLED=1 $(GO) test \ -count $(TEST_COUNT) \ -race \ -timeout $(TEST_TIMEOUT) \ diff --git a/simulator/container.go b/simulator/container.go index fec1c0f48..1fd57a4d2 100644 --- a/simulator/container.go +++ b/simulator/container.go @@ -19,24 +19,17 @@ package simulator import ( "archive/tar" "bytes" - "encoding/hex" "encoding/json" + "errors" "fmt" "io" "log" - "net/http" "os" "os/exec" "path" "regexp" - "strconv" "strings" "time" - - "github.com/google/uuid" - - "github.com/vmware/govmomi/vim25/methods" - "github.com/vmware/govmomi/vim25/types" ) var ( @@ -62,115 +55,117 @@ type networkSettings struct { MacAddress string } -// inspect applies container network settings to vm.Guest properties. -func (c *container) inspect(vm *VirtualMachine) error { - if c.id == "" { - return nil +type containerDetails struct { + State struct { + Running bool + Paused bool } + NetworkSettings struct { + networkSettings + Networks map[string]networkSettings + } +} - var objects []struct { - State struct { - Running bool - Paused bool - } - NetworkSettings struct { - networkSettings - Networks map[string]networkSettings - } +type unknownContainer error +type uninitializedContainer error + +var sanitizeNameRx = regexp.MustCompile(`[\(\)\s]`) + +func sanitizeName(name string) string { + return sanitizeNameRx.ReplaceAllString(name, "-") +} + +func constructContainerName(name, uid string) string { + return fmt.Sprintf("vcsim-%s-%s", sanitizeName(name), uid) +} + +func constructVolumeName(containerName, uid, volumeName string) string { + return constructContainerName(containerName, uid) + "--" + sanitizeName(volumeName) +} + +func extractNameAndUid(containerName string) (name string, uid string, err error) { + parts := strings.Split(strings.TrimPrefix(containerName, "vcsim-"), "-") + if len(parts) != 2 { + err = fmt.Errorf("container name does not match expected vcsim-name-uid format: %s", containerName) + return } - cmd := exec.Command("docker", "inspect", c.id) - out, err := cmd.Output() + return parts[0], parts[1], nil +} + +type tarEntry struct { + header *tar.Header + content []byte +} + +// From https://docs.docker.com/engine/reference/commandline/cp/ : +// > It is not possible to copy certain system files such as resources under /proc, /sys, /dev, tmpfs, and mounts created by the user in the container. +// > However, you can still copy such files by manually running tar in docker exec. +// TODO: look at whether this can useful combine with populateVolume for the tar portion or whether the duplication is low enough to make sense +func copyToGuest(id string, dest string, length int64, reader io.Reader) error { + cmd := exec.Command("docker", "exec", "-i", id, "tar", "Cxf", path.Dir(dest), "-") + cmd.Stderr = os.Stderr + stdin, err := cmd.StdinPipe() if err != nil { return err } - if err = json.NewDecoder(bytes.NewReader(out)).Decode(&objects); err != nil { + + err = cmd.Start() + if err != nil { return err } - vm.Config.Annotation = strings.Join(cmd.Args, " ") - vm.logPrintf("%s: %s", vm.Config.Annotation, string(out)) - - for _, o := range objects { - s := o.NetworkSettings.networkSettings - - for _, n := range o.NetworkSettings.Networks { - s = n - break - } + tw := tar.NewWriter(stdin) + _ = tw.WriteHeader(&tar.Header{ + Name: path.Base(dest), + Size: length, + Mode: 0444, + ModTime: time.Now(), + }) - if o.State.Paused { - vm.Runtime.PowerState = types.VirtualMachinePowerStateSuspended - } else if o.State.Running { - vm.Runtime.PowerState = types.VirtualMachinePowerStatePoweredOn - } else { - vm.Runtime.PowerState = types.VirtualMachinePowerStatePoweredOff - } + _, err = io.Copy(tw, reader) - vm.Guest.IpAddress = s.IPAddress - vm.Summary.Guest.IpAddress = s.IPAddress - - if len(vm.Guest.Net) != 0 { - net := &vm.Guest.Net[0] - net.IpAddress = []string{s.IPAddress} - net.MacAddress = s.MacAddress - net.IpConfig = &types.NetIpConfigInfo{ - IpAddress: []types.NetIpConfigInfoIpAddress{{ - IpAddress: s.IPAddress, - PrefixLength: int32(s.IPPrefixLen), - State: string(types.NetIpConfigInfoIpAddressStatusPreferred), - }}, - } - } + errin := tw.Close() + errout := stdin.Close() - for _, d := range vm.Config.Hardware.Device { - if eth, ok := d.(types.BaseVirtualEthernetCard); ok { - eth.GetVirtualEthernetCard().MacAddress = s.MacAddress - break - } - } - } + errwait := cmd.Wait() - return nil + return errors.Join(err, errout, errin, errwait) } -func (c *container) prepareGuestOperation( - vm *VirtualMachine, - auth types.BaseGuestAuthentication) types.BaseMethodFault { - - if c.id == "" { - return new(types.GuestOperationsUnavailable) +func copyFromGuest(id string, src string, sink func(int64, io.Reader) error) error { + cmd := exec.Command("docker", "exec", id, "tar", "Ccf", path.Dir(src), "-", path.Base(src)) + cmd.Stderr = os.Stderr + stdout, err := cmd.StdoutPipe() + if err != nil { + return err } - if vm.Runtime.PowerState != types.VirtualMachinePowerStatePoweredOn { - return &types.InvalidPowerState{ - RequestedState: types.VirtualMachinePowerStatePoweredOn, - ExistingState: vm.Runtime.PowerState, - } + if err = cmd.Start(); err != nil { + return err } - switch creds := auth.(type) { - case *types.NamePasswordAuthentication: - if creds.Username == "" || creds.Password == "" { - return new(types.InvalidGuestLogin) - } - default: - return new(types.InvalidGuestLogin) + + tr := tar.NewReader(stdout) + header, err := tr.Next() + if err != nil { + return err } - return nil -} -var sanitizeNameRx = regexp.MustCompile(`[\(\)\s]`) + err = sink(header.Size, tr) + errwait := cmd.Wait() -func sanitizeName(name string) string { - return sanitizeNameRx.ReplaceAllString(name, "-") + return errors.Join(err, errwait) } -// createDMI writes BIOS UUID DMI files to a container volume -func (c *container) createDMI(vm *VirtualMachine, name string) error { +// populateVolume creates a volume tightly associated with the specified container, populated with the provided files +// If the header.Size is omitted or set to zero, then len(content+1) is used. +func populateVolume(containerName string, volumeName string, files []tarEntry) error { image := os.Getenv("VCSIM_BUSYBOX") if image == "" { image = "busybox" } + // TODO: do we need to cap name lengths so as not to overflow? + name := sanitizeName(containerName) + "--" + sanitizeName(volumeName) cmd := exec.Command("docker", "run", "--rm", "-i", "-v", name+":"+"/"+name, image, "tar", "-C", "/"+name, "-xf", "-") stdin, err := cmd.StdinPipe() if err != nil { @@ -184,429 +179,325 @@ func (c *container) createDMI(vm *VirtualMachine, name string) error { tw := tar.NewWriter(stdin) - dmi := []struct { - name string - val func(uuid.UUID) string - }{ - {"product_uuid", productUUID}, - {"product_serial", productSerial}, - } + for _, file := range files { + header := file.header + + if header.Size == 0 && len(file.content) > 0 { + header.Size = int64(len(file.content)) + } - for _, file := range dmi { - val := file.val(vm.uid) - _ = tw.WriteHeader(&tar.Header{ - Name: file.name, - Size: int64(len(val) + 1), - Mode: 0444, - ModTime: time.Now(), - }) - _, _ = fmt.Fprintln(tw, val) + if header.ModTime.IsZero() { + header.ModTime = time.Now() + } + + if header.Mode == 0 { + header.Mode = 0444 + } + + tarErr := tw.WriteHeader(header) + if tarErr == nil { + _, tarErr = tw.Write(file.content) + } } - _ = tw.Close() - _ = stdin.Close() + err1 := tw.Close() + err2 := stdin.Close() + err = errors.Join(err1, err2) - if err := cmd.Wait(); err != nil { + if err3 := cmd.Wait(); err3 != nil { stderr := "" if xerr, ok := err.(*exec.ExitError); ok { stderr = string(xerr.Stderr) } - log.Printf("%s %s: %s %s", vm.Name, cmd.Args, err, stderr) - return err - } - - return nil -} + log.Printf("%s %s: %s %s", name, cmd.Args, err, stderr) -var ( - toolsRunning = []types.PropertyChange{ - {Name: "guest.toolsStatus", Val: types.VirtualMachineToolsStatusToolsOk}, - {Name: "guest.toolsRunningStatus", Val: string(types.VirtualMachineToolsRunningStatusGuestToolsRunning)}, + return errors.Join(err, err3) } - toolsNotRunning = []types.PropertyChange{ - {Name: "guest.toolsStatus", Val: types.VirtualMachineToolsStatusToolsNotRunning}, - {Name: "guest.toolsRunningStatus", Val: string(types.VirtualMachineToolsRunningStatusGuestToolsNotRunning)}, - } -) + return err +} -// start runs the container if specified by the RUN.container extraConfig property. -func (c *container) start(ctx *Context, vm *VirtualMachine) { - if c.id != "" { - start := "start" - if vm.Runtime.PowerState == types.VirtualMachinePowerStateSuspended { - start = "unpause" - } - cmd := exec.Command("docker", start, c.id) - err := cmd.Run() - if err != nil { - log.Printf("%s %s: %s", vm.Name, cmd.Args, err) - } else { - ctx.Map.Update(vm, toolsRunning) - } - return +// create +// - name - pretty name, eg. vm name +// - id - uuid or similar - this is merged into container name rather than dictating containerID +// - networks - set of bridges to connect the container to +// - volumes - colon separated tuple of volume name to mount path. Passed directly to docker via -v so mount options can be postfixed. +// - env - array of environment vairables in name=value form +// - image - the name of the container image to use, including tag +// - args - the command+args to pass to the container +func create(ctx *Context, name string, id string, networks []string, volumes []string, ports []string, env []string, image string, args []string) (*container, error) { + if len(image) == 0 { + return nil, errors.New("cannot create container backing without an image") } - var args []string - var env []string - mountDMI := true - ports := make(map[string]string) + var c container + c.name = constructContainerName(name, id) - for _, opt := range vm.Config.ExtraConfig { - val := opt.GetOptionValue() - if val.Key == "RUN.container" { - run := val.Value.(string) - err := json.Unmarshal([]byte(run), &args) - if err != nil { - args = []string{run} - } + // assemble env + var dockerNet []string + var dockerVol []string + var dockerPort []string + var dockerEnv []string - continue - } - if val.Key == "RUN.mountdmi" { - var mount bool - err := json.Unmarshal([]byte(val.Value.(string)), &mount) - if err == nil { - mountDMI = mount - } - } - if strings.HasPrefix(val.Key, "RUN.port.") { - sKey := strings.Split(val.Key, ".") - containerPort := sKey[len(sKey)-1] - ports[containerPort] = val.Value.(string) - } - if strings.HasPrefix(val.Key, "RUN.env.") { - sKey := strings.Split(val.Key, ".") - envKey := sKey[len(sKey)-1] - env = append(env, "--env", fmt.Sprintf("%s=%s", envKey, val.Value.(string))) - } - if strings.HasPrefix(val.Key, "guestinfo.") { - key := strings.Replace(strings.ToUpper(val.Key), ".", "_", -1) - env = append(env, "--env", fmt.Sprintf("VMX_%s=%s", key, val.Value.(string))) - } + for i := range env { + dockerEnv = append(dockerEnv, "--env", env[i]) } - if len(args) == 0 { - return - } - if len(env) != 0 { - // Configure env as the data access method for cloud-init-vmware-guestinfo - env = append(env, "--env", "VMX_GUESTINFO=true") - } - if len(ports) != 0 { - // Publish the specified container ports - for containerPort, hostPort := range ports { - env = append(env, "-p", fmt.Sprintf("%s:%s", hostPort, containerPort)) - } + for i := range volumes { + dockerVol = append(dockerVol, "-v", volumes[i]) } - c.name = fmt.Sprintf("vcsim-%s-%s", sanitizeName(vm.Name), vm.uid) - run := append([]string{"docker", "run", "-d", "--name", c.name}, env...) + for i := range ports { + dockerPort = append(dockerPort, "-p", ports[i]) + } - if mountDMI { - if err := c.createDMI(vm, c.name); err != nil { - return - } - run = append(run, "-v", fmt.Sprintf("%s:%s:ro", c.name, "/sys/class/dmi/id")) + for i := range networks { + dockerNet = append(dockerNet, "--network", networks[i]) } - args = append(run, args...) - cmd := exec.Command(shell, "-c", strings.Join(args, " ")) + run := []string{"docker", "create", "--name", c.name} + run = append(run, dockerNet...) + run = append(run, dockerVol...) + run = append(run, dockerPort...) + run = append(run, dockerEnv...) + run = append(run, image) + run = append(run, args...) + + // this combines all the run options into a single string that's passed to /bin/bash -c as the single argument to force bash parsing. + // TODO: make this configurable behaviour so users also have the option of not escaping everything for bash + cmd := exec.Command(shell, "-c", strings.Join(run, " ")) out, err := cmd.Output() if err != nil { stderr := "" if xerr, ok := err.(*exec.ExitError); ok { stderr = string(xerr.Stderr) } - log.Printf("%s %s: %s %s", vm.Name, cmd.Args, err, stderr) - return + log.Printf("%s %s: %s %s", name, cmd.Args, err, stderr) + + return nil, err } - ctx.Map.Update(vm, toolsRunning) c.id = strings.TrimSpace(string(out)) - vm.logPrintf("%s %s: %s", cmd.Path, cmd.Args, c.id) - - if err = c.inspect(vm); err != nil { - log.Printf("%s inspect %s: %s", vm.Name, c.id, err) - } - // Start watching the container resource. - go c.watchContainer(vm) + return &c, nil } -// watchContainer monitors the underlying container and updates the VM -// properties based on the container status. This occurs until either -// the container or the VM is removed. -func (c *container) watchContainer(vm *VirtualMachine) { - - inspectInterval := time.Duration(5 * time.Second) - if d, err := time.ParseDuration(os.Getenv("VCSIM_INSPECT_INTERVAL")); err == nil { - inspectInterval = d - } - - var ( - ctx = SpoofContext() - done = make(chan struct{}) - ticker = time.NewTicker(inspectInterval) - ) +// populateVolume takes the specified files and writes them into a volume named for the container. +func (c *container) populateVolume(name string, files []tarEntry) error { + return populateVolume(c.name, name, files) +} - stopUpdatingVmFromContainer := func() { - ticker.Stop() - close(done) +// inspect retrieves and parses container properties into directly usable struct +// returns: +// +// out - the stdout of the command +// detail - basic struct populated with container details +// err: +// * if c.id is empty, or docker returns "No such object", will return an uninitializedContainer error +// * err from either execution or parsing of json output +func (c *container) inspect() (out []byte, detail containerDetails, err error) { + if c.id == "" { + err = uninitializedContainer(errors.New("inspect of uninitialized container")) + return } - destroyVm := func() { - // If the container cannot be found then destroy this VM. - taskRef := vm.DestroyTask(ctx, &types.Destroy_Task{ - This: vm.Self, - }).(*methods.Destroy_TaskBody).Res.Returnval - task := ctx.Map.Get(taskRef).(*Task) + var details []containerDetails - // Wait for the task to complete and see if there is an error. - task.Wait() - if task.Info.Error != nil { - vm.logPrintf("failed to destroy vm: err=%v", *task.Info.Error) + cmd := exec.Command("docker", "inspect", c.id) + out, err = cmd.Output() + if eErr, ok := err.(*exec.ExitError); ok { + if strings.Contains(string(eErr.Stderr), "No such object") { + err = uninitializedContainer(errors.New("inspect of uninitialized container")) } } - updateVmFromContainer := func() { - // Exit the monitor loop if the VM was removed from the API side. - if c.id == "" { - stopUpdatingVmFromContainer() - return - } + if err != nil { + return + } - if err := c.inspect(vm); err != nil { - // If there is an error inspecting the container because it no - // longer exists, then destroy the VM as well. Please note the - // reason this logic does not invoke stopUpdatingVmFromContainer - // is because that will be handled the next time this function - // is entered and c.id is empty. - if err, ok := err.(*exec.ExitError); ok { - if strings.Contains(string(err.Stderr), "No such object") { - destroyVm() - } - } - } + if err = json.NewDecoder(bytes.NewReader(out)).Decode(&details); err != nil { + return } - // Update the VM from the container at regular intervals until the done - // channel is closed. - for { - select { - case <-ticker.C: - ctx.WithLock(vm, updateVmFromContainer) - case <-done: - return - } + if len(details) != 1 { + err = fmt.Errorf("multiple containers (%d) match ID: %s", len(details), c.id) + return } + + detail = details[0] + return } -// stop the container (if any) for the given vm. -func (c *container) stop(ctx *Context, vm *VirtualMachine) { +// start +// - if the container already exists, start it or unpause it. +func (c *container) start(ctx *Context) error { if c.id == "" { - return + return uninitializedContainer(errors.New("start of uninitialized container")) } - cmd := exec.Command("docker", "stop", c.id) - err := cmd.Run() + start := "start" + _, detail, err := c.inspect() if err != nil { - log.Printf("%s %s: %s", vm.Name, cmd.Args, err) - } else { - ctx.Map.Update(vm, toolsNotRunning) + return err } + + if detail.State.Paused { + start = "unpause" + } + + cmd := exec.Command("docker", start, c.id) + err = cmd.Run() + if err != nil { + log.Printf("%s %s: %s", c.name, cmd.Args, err) + } + + return err } // pause the container (if any) for the given vm. -func (c *container) pause(ctx *Context, vm *VirtualMachine) { +func (c *container) pause(ctx *Context) error { if c.id == "" { - return + return uninitializedContainer(errors.New("pause of uninitialized container")) } cmd := exec.Command("docker", "pause", c.id) err := cmd.Run() if err != nil { - log.Printf("%s %s: %s", vm.Name, cmd.Args, err) - } else { - ctx.Map.Update(vm, toolsNotRunning) + log.Printf("%s %s: %s", c.name, cmd.Args, err) } + + return err } // restart the container (if any) for the given vm. -func (c *container) restart(ctx *Context, vm *VirtualMachine) { +func (c *container) restart(ctx *Context) error { if c.id == "" { - return + return uninitializedContainer(errors.New("restart of uninitialized container")) } cmd := exec.Command("docker", "restart", c.id) err := cmd.Run() if err != nil { - log.Printf("%s %s: %s", vm.Name, cmd.Args, err) - } else { - ctx.Map.Update(vm, toolsRunning) + log.Printf("%s %s: %s", c.name, cmd.Args, err) } + + return err } -// remove the container (if any) for the given vm. -func (c *container) remove(vm *VirtualMachine) { +// stop the container (if any) for the given vm. +func (c *container) stop(ctx *Context) error { if c.id == "" { - return + return uninitializedContainer(errors.New("stop of uninitialized container")) } - args := [][]string{ - {"rm", "-v", "-f", c.id}, - {"volume", "rm", "-f", c.name}, - } - - for i := range args { - cmd := exec.Command("docker", args[i]...) - err := cmd.Run() - if err != nil { - log.Printf("%s %s: %s", vm.Name, cmd.Args, err) - } + cmd := exec.Command("docker", "stop", c.id) + err := cmd.Run() + if err != nil { + log.Printf("%s %s: %s", c.name, cmd.Args, err) } - c.id = "" + return err } -func (c *container) exec(ctx *Context, vm *VirtualMachine, auth types.BaseGuestAuthentication, args []string) (string, types.BaseMethodFault) { - fault := vm.run.prepareGuestOperation(vm, auth) - if fault != nil { - return "", fault +// exec invokes the specified command, with executable being the first of the args, in the specified container +// returns +// +// string - combined stdout and stderr from command +// err +// * uninitializedContainer error - if c.id is empty +// * err from cmd execution +func (c *container) exec(ctx *Context, args []string) (string, error) { + if c.id == "" { + return "", uninitializedContainer(errors.New("exec into uninitialized container")) } - args = append([]string{"exec", vm.run.id}, args...) + args = append([]string{"exec", c.id}, args...) cmd := exec.Command("docker", args...) - res, err := cmd.CombinedOutput() if err != nil { - log.Printf("%s: %s (%s)", vm.Self, cmd.Args, string(res)) - return "", new(types.GuestOperationsFault) + log.Printf("%s: %s (%s)", c.name, cmd.Args, string(res)) + return "", err } return strings.TrimSpace(string(res)), nil } -// From https://docs.docker.com/engine/reference/commandline/cp/ : -// > It is not possible to copy certain system files such as resources under /proc, /sys, /dev, tmpfs, and mounts created by the user in the container. -// > However, you can still copy such files by manually running tar in docker exec. -func guestUpload(id string, file string, r *http.Request) error { - cmd := exec.Command("docker", "exec", "-i", id, "tar", "Cxf", path.Dir(file), "-") - cmd.Stderr = os.Stderr - stdin, err := cmd.StdinPipe() - if err != nil { - return err - } - if err = cmd.Start(); err != nil { - return err +// remove the container (if any) for the given vm. Considers removal of an uninitialized container success. +// returns: +// +// err - joined err from deletion of container and matching volume name +func (c *container) remove(ctx *Context) error { + if c.id == "" { + // consider absence success + return nil } - tw := tar.NewWriter(stdin) - _ = tw.WriteHeader(&tar.Header{ - Name: path.Base(file), - Size: r.ContentLength, - Mode: 0444, - ModTime: time.Now(), - }) - - _, _ = io.Copy(tw, r.Body) - - _ = tw.Close() - _ = stdin.Close() - _ = r.Body.Close() - - return cmd.Wait() -} - -func guestDownload(id string, file string, w http.ResponseWriter) error { - cmd := exec.Command("docker", "exec", id, "tar", "Ccf", path.Dir(file), "-", path.Base(file)) - cmd.Stderr = os.Stderr - stdout, err := cmd.StdoutPipe() + cmd := exec.Command("docker", "rm", "-v", "-f", c.id) + err := cmd.Run() if err != nil { - return err - } - if err = cmd.Start(); err != nil { - return err + log.Printf("%s %s: %s", c.name, cmd.Args, err) } - tr := tar.NewReader(stdout) - header, err := tr.Next() - if err != nil { - return err + // TODO: modify this to list all volumes with c.name prefix and delete them - necessary because populateVolume was generalized + cmd = exec.Command("docker", "volume", "rm", "-f", c.name) + err2 := cmd.Run() + if err2 != nil { + log.Printf("%s %s: %s", c.name, cmd.Args, err2) } - w.Header().Set("Content-Length", strconv.FormatInt(header.Size, 10)) - _, _ = io.Copy(w, tr) - - return cmd.Wait() -} + combinedErr := errors.Join(err, err2) -const guestPrefix = "/guestFile/" - -// ServeGuest handles container guest file upload/download -func ServeGuest(w http.ResponseWriter, r *http.Request) { - // Real vCenter form: /guestFile?id=139&token=... - // vcsim form: /guestFile/tmp/foo/bar?id=ebc8837b8cb6&token=... + if combinedErr == nil { + c.id = "" + } - id := r.URL.Query().Get("id") - file := strings.TrimPrefix(r.URL.Path, guestPrefix[:len(guestPrefix)-1]) - var err error + return combinedErr +} - switch r.Method { - case http.MethodPut: - err = guestUpload(id, file, r) - case http.MethodGet: - err = guestDownload(id, file, w) - default: - w.WriteHeader(http.StatusMethodNotAllowed) - return +// watchContainer monitors the underlying container and updates +// properties based on the container status. This occurs until either +// the container or the VM is removed. +// returns: +// +// err - uninitializedContainer error - if c.id is empty +func (c *container) watchContainer(ctx *Context, updateFn func(*Context, *containerDetails, *container) error) error { + if c.id == "" { + return uninitializedContainer(errors.New("Attempt to watch uninitialized container")) } - if err != nil { - log.Printf("%s %s: %s", r.Method, r.URL, err) - w.WriteHeader(http.StatusInternalServerError) - } -} + // Update the VM from the container at regular intervals until the done + // channel is closed. + go func() { + inspectInterval := time.Duration(5 * time.Second) + if d, err := time.ParseDuration(os.Getenv("VCSIM_INSPECT_INTERVAL")); err == nil { + inspectInterval = d + } + ticker := time.NewTicker(inspectInterval) + + for { + select { + case <-ticker.C: + _, details, err := c.inspect() + var rmErr error + var removing bool + if _, ok := err.(uninitializedContainer); ok { + removing = true + rmErr = c.remove(ctx) + } -// productSerial returns the uuid in /sys/class/dmi/id/product_serial format -func productSerial(id uuid.UUID) string { - var dst [len(id)*2 + len(id) - 1]byte - - j := 0 - for i := 0; i < len(id); i++ { - hex.Encode(dst[j:j+2], id[i:i+1]) - j += 3 - if j < len(dst) { - s := j - 1 - if s == len(dst)/2 { - dst[s] = '-' - } else { - dst[s] = ' ' + updateErr := updateFn(ctx, &details, c) + err = errors.Join(rmErr, updateErr) + if removing && err == nil { + // if we don't succeed we want to re-try + ticker.Stop() + return + } + // TODO: log err? + case <-ctx.Done(): + return } } - } + }() - return fmt.Sprintf("VMware-%s", string(dst[:])) -} - -// productUUID returns the uuid in /sys/class/dmi/id/product_uuid format -func productUUID(id uuid.UUID) string { - var dst [36]byte - - hex.Encode(dst[0:2], id[3:4]) - hex.Encode(dst[2:4], id[2:3]) - hex.Encode(dst[4:6], id[1:2]) - hex.Encode(dst[6:8], id[0:1]) - dst[8] = '-' - hex.Encode(dst[9:11], id[5:6]) - hex.Encode(dst[11:13], id[4:5]) - dst[13] = '-' - hex.Encode(dst[14:16], id[7:8]) - hex.Encode(dst[16:18], id[6:7]) - dst[18] = '-' - hex.Encode(dst[19:23], id[8:10]) - dst[23] = '-' - hex.Encode(dst[24:], id[10:]) - - return strings.ToUpper(string(dst[:])) + return nil } diff --git a/simulator/container_virtual_machine.go b/simulator/container_virtual_machine.go new file mode 100644 index 000000000..895dd4d8a --- /dev/null +++ b/simulator/container_virtual_machine.go @@ -0,0 +1,497 @@ +/* +Copyright (c) 2018 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package simulator + +import ( + "archive/tar" + "encoding/hex" + "encoding/json" + "errors" + "fmt" + "io" + "log" + "net/http" + "strconv" + "strings" + + "github.com/google/uuid" + + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/types" +) + +var ( + toolsRunning = []types.PropertyChange{ + {Name: "guest.toolsStatus", Val: types.VirtualMachineToolsStatusToolsOk}, + {Name: "guest.toolsRunningStatus", Val: string(types.VirtualMachineToolsRunningStatusGuestToolsRunning)}, + } + + toolsNotRunning = []types.PropertyChange{ + {Name: "guest.toolsStatus", Val: types.VirtualMachineToolsStatusToolsNotRunning}, + {Name: "guest.toolsRunningStatus", Val: string(types.VirtualMachineToolsRunningStatusGuestToolsNotRunning)}, + } +) + +type simVM struct { + vm *VirtualMachine + c *container +} + +// applies container network settings to vm.Guest properties. +func (svm *simVM) syncNetworkConfigToVMGuestProperties() error { + if svm == nil { + return nil + } + + out, detail, err := svm.c.inspect() + if err != nil { + return err + } + + svm.vm.Config.Annotation = "inspect" + svm.vm.logPrintf("%s: %s", svm.vm.Config.Annotation, string(out)) + + netS := detail.NetworkSettings.networkSettings + + // ? Why is this valid - we're taking the first entry while iterating over a MAP + for _, n := range detail.NetworkSettings.Networks { + netS = n + break + } + + if detail.State.Paused { + svm.vm.Runtime.PowerState = types.VirtualMachinePowerStateSuspended + } else if detail.State.Running { + svm.vm.Runtime.PowerState = types.VirtualMachinePowerStatePoweredOn + } else { + svm.vm.Runtime.PowerState = types.VirtualMachinePowerStatePoweredOff + } + + svm.vm.Guest.IpAddress = netS.IPAddress + svm.vm.Summary.Guest.IpAddress = netS.IPAddress + + if len(svm.vm.Guest.Net) != 0 { + net := &svm.vm.Guest.Net[0] + net.IpAddress = []string{netS.IPAddress} + net.MacAddress = netS.MacAddress + net.IpConfig = &types.NetIpConfigInfo{ + IpAddress: []types.NetIpConfigInfoIpAddress{{ + IpAddress: netS.IPAddress, + PrefixLength: int32(netS.IPPrefixLen), + State: string(types.NetIpConfigInfoIpAddressStatusPreferred), + }}, + } + } + + for _, d := range svm.vm.Config.Hardware.Device { + if eth, ok := d.(types.BaseVirtualEthernetCard); ok { + eth.GetVirtualEthernetCard().MacAddress = netS.MacAddress + break + } + } + + return nil +} + +func (svm *simVM) prepareGuestOperation(auth types.BaseGuestAuthentication) types.BaseMethodFault { + if svm != nil && (svm.c == nil || svm.c.id == "") { + return new(types.GuestOperationsUnavailable) + } + + if svm.vm.Runtime.PowerState != types.VirtualMachinePowerStatePoweredOn { + return &types.InvalidPowerState{ + RequestedState: types.VirtualMachinePowerStatePoweredOn, + ExistingState: svm.vm.Runtime.PowerState, + } + } + + switch creds := auth.(type) { + case *types.NamePasswordAuthentication: + if creds.Username == "" || creds.Password == "" { + return new(types.InvalidGuestLogin) + } + default: + return new(types.InvalidGuestLogin) + } + + return nil +} + +// createDMI writes BIOS UUID DMI files to a container volume +func (svm *simVM) createDMI() error { + if svm.c == nil { + return nil + } + + files := []tarEntry{ + { + &tar.Header{ + Name: "product_uuid", + Mode: 0444, + }, + []byte(productUUID(svm.vm.uid)), + }, + { + &tar.Header{ + Name: "product_serial", + Mode: 0444, + }, + []byte(productSerial(svm.vm.uid)), + }, + } + + return svm.c.populateVolume("dmi", files) +} + +// createSimulationVM inspects the provided VirtualMachine and creates a simulationVM binding for it if +// the vm.Config.ExtraConfig set contains a key "RUN.container". +// If the ExtraConfig set does not contain that key, this returns nil. +// Methods on the simVM type are written to check for nil object so the return from this call can be blindly +// assigned and invoked without the caller caring about whether a binding for a backing container was warranted. +func createSimulationVM(vm *VirtualMachine) *simVM { + svm := &simVM{ + vm: vm, + } + + for _, opt := range vm.Config.ExtraConfig { + val := opt.GetOptionValue() + if val.Key == "RUN.container" { + return svm + } + } + + return nil +} + +// start runs the container if specified by the RUN.container extraConfig property. +// lazily creates a container backing if specified by an ExtraConfig property with key "RUN.container" +func (svm *simVM) start(ctx *Context) error { + if svm == nil { + return nil + } + + if svm.c != nil && svm.c.id != "" { + err := svm.c.start(ctx) + if err != nil { + log.Printf("%s %s: %s", svm.vm.Name, "start", err) + } else { + ctx.Map.Update(svm.vm, toolsRunning) + } + + return err + } + + var args []string + var env []string + var ports []string + mountDMI := true + + for _, opt := range svm.vm.Config.ExtraConfig { + val := opt.GetOptionValue() + if val.Key == "RUN.container" { + run := val.Value.(string) + err := json.Unmarshal([]byte(run), &args) + if err != nil { + args = []string{run} + } + + continue + } + + if val.Key == "RUN.mountdmi" { + var mount bool + err := json.Unmarshal([]byte(val.Value.(string)), &mount) + if err == nil { + mountDMI = mount + } + + continue + } + + if strings.HasPrefix(val.Key, "RUN.port.") { + // ? would this not make more sense as a set of tuples in the value? + // or inlined into the RUN.container freeform string as is the case with the nginx volume in the examples? + sKey := strings.Split(val.Key, ".") + containerPort := sKey[len(sKey)-1] + ports = append(ports, fmt.Sprintf("%s:%s", val.Value.(string), containerPort)) + + continue + } + + if strings.HasPrefix(val.Key, "RUN.env.") { + sKey := strings.Split(val.Key, ".") + envKey := sKey[len(sKey)-1] + env = append(env, fmt.Sprintf("%s=%s", envKey, val.Value.(string))) + } + + if strings.HasPrefix(val.Key, "guestinfo.") { + key := strings.Replace(strings.ToUpper(val.Key), ".", "_", -1) + env = append(env, fmt.Sprintf("VMX_%s=%s", key, val.Value.(string))) + + continue + } + } + + if len(args) == 0 { + // not an error - it's simply a simVM that shouldn't be backed by a container + return nil + } + + if len(env) != 0 { + // Configure env as the data access method for cloud-init-vmware-guestinfo + env = append(env, "VMX_GUESTINFO=true") + } + + volumes := []string{} + if mountDMI { + volumes = append(volumes, constructVolumeName(svm.vm.Name, svm.vm.uid.String(), "dmi")+":/sys/class/dmi/id") + } + + var err error + svm.c, err = create(ctx, svm.vm.Name, svm.vm.uid.String(), nil, volumes, ports, env, args[0], args[1:]) + if err != nil { + return err + } + + if mountDMI { + // not combined with the test assembling volumes because we want to have the container name + // set so the volume can be named based on that. + // TODO: rework volume creation to use labels and consider ditching the reliance on names for association + err = svm.createDMI() + if err != nil { + return err + } + } + + err = svm.c.start(ctx) + if err != nil { + log.Printf("%s %s: %s %s", svm.vm.Name, "start", args, err) + return err + } + + ctx.Map.Update(svm.vm, toolsRunning) + + svm.vm.logPrintf("%s: %s", args, svm.c.id) + + if err = svm.syncNetworkConfigToVMGuestProperties(); err != nil { + log.Printf("%s inspect %s: %s", svm.vm.Name, svm.c.id, err) + } + + callback := func(ctx *Context, details *containerDetails, c *container) error { + spoofctx := SpoofContext() + + if c.id == "" { + // If the container cannot be found then destroy this VM. + // TODO: figure out if we should pass the vm/container via ctx or otherwise from the callback - this might cause locking issues. + taskRef := svm.vm.DestroyTask(spoofctx, &types.Destroy_Task{This: svm.vm.Self}).(*methods.Destroy_TaskBody).Res.Returnval + task := ctx.Map.Get(taskRef).(*Task) + + // Wait for the task to complete and see if there is an error. + task.Wait() + if task.Info.Error != nil { + msg := fmt.Sprintf("failed to destroy vm: err=%v", *task.Info.Error) + svm.vm.logPrintf(msg) + + return errors.New(msg) + } + } + + return svm.syncNetworkConfigToVMGuestProperties() + } + + // Start watching the container resource. + err = svm.c.watchContainer(ctx, callback) + if _, ok := err.(uninitializedContainer); ok { + // the container has been deleted before we could watch, despite successful launch so clean up. + callback(ctx, nil, svm.c) + + // successful launch so nil the error + return nil + } + + return err +} + +// stop the container (if any) for the given vm. +func (svm *simVM) stop(ctx *Context) error { + if svm != nil { + err := svm.c.stop(ctx) + if err != nil { + log.Printf("%s %s: %s", svm.vm.Name, "stop", err) + + return err + } + } + + ctx.Map.Update(svm.vm, toolsNotRunning) + + return nil +} + +// pause the container (if any) for the given vm. +func (svm *simVM) pause(ctx *Context) error { + if svm != nil { + err := svm.c.pause(ctx) + if err != nil { + log.Printf("%s %s: %s", svm.vm.Name, "pause", err) + + return err + } + } + + ctx.Map.Update(svm.vm, toolsNotRunning) + + return nil +} + +// restart the container (if any) for the given vm. +func (svm *simVM) restart(ctx *Context) error { + if svm != nil { + err := svm.c.restart(ctx) + if err != nil { + log.Printf("%s %s: %s", svm.vm.Name, "restart", err) + + return err + } + } + + ctx.Map.Update(svm.vm, toolsRunning) + + return nil +} + +// remove the container (if any) for the given vm. +func (svm *simVM) remove(ctx *Context) error { + if svm != nil { + err := svm.c.remove(ctx) + if err != nil { + log.Printf("%s %s: %s", svm.vm.Name, "remove", err) + + return err + } + } + + return nil +} + +func (svm *simVM) exec(ctx *Context, auth types.BaseGuestAuthentication, args []string) (string, types.BaseMethodFault) { + if svm == nil { + return "", nil + } + + fault := svm.prepareGuestOperation(auth) + if fault != nil { + return "", fault + } + + out, err := svm.c.exec(ctx, args) + if err != nil { + log.Printf("%s: %s (%s)", svm.vm.Name, args, string(out)) + return "", new(types.GuestOperationsFault) + } + + return strings.TrimSpace(string(out)), nil +} + +func guestUpload(id string, file string, r *http.Request) error { + // TODO: decide behaviour for no container + err := copyToGuest(id, file, r.ContentLength, r.Body) + _ = r.Body.Close() + return err +} + +func guestDownload(id string, file string, w http.ResponseWriter) error { + // TODO: decide behaviour for no container + sink := func(len int64, r io.Reader) error { + w.Header().Set("Content-Length", strconv.FormatInt(len, 10)) + _, err := io.Copy(w, r) + return err + } + + err := copyFromGuest(id, file, sink) + return err +} + +const guestPrefix = "/guestFile/" + +// ServeGuest handles container guest file upload/download +func ServeGuest(w http.ResponseWriter, r *http.Request) { + // Real vCenter form: /guestFile?id=139&token=... + // vcsim form: /guestFile/tmp/foo/bar?id=ebc8837b8cb6&token=... + + id := r.URL.Query().Get("id") + file := strings.TrimPrefix(r.URL.Path, guestPrefix[:len(guestPrefix)-1]) + var err error + + switch r.Method { + case http.MethodPut: + err = guestUpload(id, file, r) + case http.MethodGet: + err = guestDownload(id, file, w) + default: + w.WriteHeader(http.StatusMethodNotAllowed) + return + } + + if err != nil { + log.Printf("%s %s: %s", r.Method, r.URL, err) + w.WriteHeader(http.StatusInternalServerError) + } +} + +// productSerial returns the uuid in /sys/class/dmi/id/product_serial format +func productSerial(id uuid.UUID) string { + var dst [len(id)*2 + len(id) - 1]byte + + j := 0 + for i := 0; i < len(id); i++ { + hex.Encode(dst[j:j+2], id[i:i+1]) + j += 3 + if j < len(dst) { + s := j - 1 + if s == len(dst)/2 { + dst[s] = '-' + } else { + dst[s] = ' ' + } + } + } + + return fmt.Sprintf("VMware-%s", string(dst[:])) +} + +// productUUID returns the uuid in /sys/class/dmi/id/product_uuid format +func productUUID(id uuid.UUID) string { + var dst [36]byte + + hex.Encode(dst[0:2], id[3:4]) + hex.Encode(dst[2:4], id[2:3]) + hex.Encode(dst[4:6], id[1:2]) + hex.Encode(dst[6:8], id[0:1]) + dst[8] = '-' + hex.Encode(dst[9:11], id[5:6]) + hex.Encode(dst[11:13], id[4:5]) + dst[13] = '-' + hex.Encode(dst[14:16], id[7:8]) + hex.Encode(dst[16:18], id[6:7]) + dst[18] = '-' + hex.Encode(dst[19:23], id[8:10]) + dst[23] = '-' + hex.Encode(dst[24:], id[10:]) + + return strings.ToUpper(string(dst[:])) +} diff --git a/simulator/feature_test.go b/simulator/feature_test.go index 92fb76b78..be5bd532e 100644 --- a/simulator/feature_test.go +++ b/simulator/feature_test.go @@ -26,6 +26,7 @@ import ( "os" "os/exec" "path/filepath" + "strings" "github.com/vmware/govmomi" "github.com/vmware/govmomi/find" @@ -121,6 +122,7 @@ func Example_runContainer() { }, ExtraConfig: []types.BaseOptionValue{ &types.OptionValue{Key: "RUN.container", Value: args}, // run nginx + &types.OptionValue{Key: "RUN.port.80", Value: "8888"}, // test port remap }, } @@ -144,13 +146,25 @@ func Example_runContainer() { ip, _ := vm.WaitForIP(ctx, true) // Returns the docker container's IP - // Count the number of bytes in feature_test.go via nginx + // Count the number of bytes in feature_test.go via nginx going direct to the container cmd := exec.Command("docker", "run", "--rm", "curlimages/curl", "curl", "-f", fmt.Sprintf("http://%s", ip)) var buf bytes.Buffer cmd.Stdout = &buf err = cmd.Run() - if err != nil { - log.Fatal(err) + res := buf.String() + // TODO: look at switching to assert/require instead of raw tests + if err != nil || strings.TrimSpace(res) != fcontent { + log.Fatal(err, buf.String()) + } + + // Count the number of bytes in feature_test.go via nginx going via port remap on host + cmd = exec.Command("docker", "run", "--rm", "--network=host", "curlimages/curl", "curl", "-f", fmt.Sprintf("http://%s", ip)) + buf.Reset() + cmd.Stdout = &buf + err = cmd.Run() + res = buf.String() + if err != nil || strings.TrimSpace(res) != fcontent { + log.Fatal(err, buf.String()) } // PowerOff stops the container diff --git a/simulator/guest_operations_manager.go b/simulator/guest_operations_manager.go index 780f44a04..f05883580 100644 --- a/simulator/guest_operations_manager.go +++ b/simulator/guest_operations_manager.go @@ -69,7 +69,7 @@ func guestURL(ctx *Context, vm *VirtualMachine, path string) string { Host: "*", // See guest.FileManager.TransferURL Path: guestPrefix + strings.TrimPrefix(path, "/"), RawQuery: url.Values{ - "id": []string{vm.run.id}, + "id": []string{vm.svm.c.id}, "token": []string{ctx.Session.Key}, }.Encode(), }).String() @@ -79,7 +79,7 @@ func (m *GuestFileManager) InitiateFileTransferToGuest(ctx *Context, req *types. body := new(methods.InitiateFileTransferToGuestBody) vm := ctx.Map.Get(req.Vm).(*VirtualMachine) - err := vm.run.prepareGuestOperation(vm, req.Auth) + err := vm.svm.prepareGuestOperation(req.Auth) if err != nil { body.Fault_ = Fault("", err) return body @@ -96,7 +96,7 @@ func (m *GuestFileManager) InitiateFileTransferFromGuest(ctx *Context, req *type body := new(methods.InitiateFileTransferFromGuestBody) vm := ctx.Map.Get(req.Vm).(*VirtualMachine) - err := vm.run.prepareGuestOperation(vm, req.Auth) + err := vm.svm.prepareGuestOperation(req.Auth) if err != nil { body.Fault_ = Fault("", err) return body @@ -126,7 +126,7 @@ func (m *GuestProcessManager) StartProgramInGuest(ctx *Context, req *types.Start vm := ctx.Map.Get(req.Vm).(*VirtualMachine) - fault := vm.run.prepareGuestOperation(vm, auth) + fault := vm.svm.prepareGuestOperation(auth) if fault != nil { body.Fault_ = Fault("", fault) } @@ -141,7 +141,7 @@ func (m *GuestProcessManager) StartProgramInGuest(ctx *Context, req *types.Start args = append(args, "-e", e) } - args = append(args, vm.run.id, spec.ProgramPath, spec.Arguments) + args = append(args, vm.svm.c.id, spec.ProgramPath, spec.Arguments) spec.ProgramPath = "docker" spec.Arguments = strings.Join(args, " ") @@ -213,7 +213,7 @@ func (m *GuestFileManager) mktemp(ctx *Context, req *types.CreateTemporaryFileIn vm := ctx.Map.Get(req.Vm).(*VirtualMachine) - return vm.run.exec(ctx, vm, req.Auth, args) + return vm.svm.exec(ctx, req.Auth, args) } func (m *GuestFileManager) CreateTemporaryFileInGuest(ctx *Context, req *types.CreateTemporaryFileInGuest) soap.HasFault { @@ -298,7 +298,7 @@ func (m *GuestFileManager) ListFilesInGuest(ctx *Context, req *types.ListFilesIn return body } - res, fault := vm.run.exec(ctx, vm, req.Auth, listFiles(req)) + res, fault := vm.svm.exec(ctx, req.Auth, listFiles(req)) if fault != nil { body.Fault_ = Fault("", fault) return body @@ -317,7 +317,7 @@ func (m *GuestFileManager) DeleteFileInGuest(ctx *Context, req *types.DeleteFile vm := ctx.Map.Get(req.Vm).(*VirtualMachine) - _, fault := vm.run.exec(ctx, vm, req.Auth, args) + _, fault := vm.svm.exec(ctx, req.Auth, args) if fault != nil { body.Fault_ = Fault("", fault) return body @@ -338,7 +338,7 @@ func (m *GuestFileManager) DeleteDirectoryInGuest(ctx *Context, req *types.Delet vm := ctx.Map.Get(req.Vm).(*VirtualMachine) - _, fault := vm.run.exec(ctx, vm, req.Auth, args) + _, fault := vm.svm.exec(ctx, req.Auth, args) if fault != nil { body.Fault_ = Fault("", fault) return body @@ -359,7 +359,7 @@ func (m *GuestFileManager) MakeDirectoryInGuest(ctx *Context, req *types.MakeDir vm := ctx.Map.Get(req.Vm).(*VirtualMachine) - _, fault := vm.run.exec(ctx, vm, req.Auth, args) + _, fault := vm.svm.exec(ctx, req.Auth, args) if fault != nil { body.Fault_ = Fault("", fault) return body @@ -381,7 +381,7 @@ func (m *GuestFileManager) MoveFileInGuest(ctx *Context, req *types.MoveFileInGu vm := ctx.Map.Get(req.Vm).(*VirtualMachine) - _, fault := vm.run.exec(ctx, vm, req.Auth, args) + _, fault := vm.svm.exec(ctx, req.Auth, args) if fault != nil { body.Fault_ = Fault("", fault) return body @@ -399,7 +399,7 @@ func (m *GuestFileManager) MoveDirectoryInGuest(ctx *Context, req *types.MoveDir vm := ctx.Map.Get(req.Vm).(*VirtualMachine) - _, fault := vm.run.exec(ctx, vm, req.Auth, args) + _, fault := vm.svm.exec(ctx, req.Auth, args) if fault != nil { body.Fault_ = Fault("", fault) return body @@ -424,7 +424,7 @@ func (m *GuestFileManager) ChangeFileAttributesInGuest(ctx *Context, req *types. if attr.Permissions != 0 { args := []string{"chmod", fmt.Sprintf("%#o", attr.Permissions), req.GuestFilePath} - _, fault := vm.run.exec(ctx, vm, req.Auth, args) + _, fault := vm.svm.exec(ctx, req.Auth, args) if fault != nil { body.Fault_ = Fault("", fault) return body @@ -443,7 +443,7 @@ func (m *GuestFileManager) ChangeFileAttributesInGuest(ctx *Context, req *types. if c.id != nil { args := []string{c.cmd, fmt.Sprintf("%d", *c.id), req.GuestFilePath} - _, fault := vm.run.exec(ctx, vm, req.Auth, args) + _, fault := vm.svm.exec(ctx, req.Auth, args) if fault != nil { body.Fault_ = Fault("", fault) return body diff --git a/simulator/model.go b/simulator/model.go index b3da2900c..99d7625e1 100644 --- a/simulator/model.go +++ b/simulator/model.go @@ -492,7 +492,7 @@ func (m *Model) Create() error { // 1 NIC per VM, backed by a DVPG if Model.Portgroup > 0 vmnet := esx.EthernetCard.Backing - // addHost adds a cluster host or a stanalone host. + // addHost adds a cluster host or a standalone host. addHost := func(name string, f func(types.HostConnectSpec) (*object.Task, error)) (*object.HostSystem, error) { spec := types.HostConnectSpec{ HostName: name, @@ -855,7 +855,7 @@ func (m *Model) Remove() { Map.m.Lock() for _, obj := range Map.objects { if vm, ok := obj.(*VirtualMachine); ok { - vm.run.remove(vm) + vm.svm.remove(SpoofContext()) } } Map.m.Unlock() diff --git a/simulator/virtual_machine.go b/simulator/virtual_machine.go index 524b6e24c..dd755ded6 100644 --- a/simulator/virtual_machine.go +++ b/simulator/virtual_machine.go @@ -46,7 +46,7 @@ type VirtualMachine struct { log string sid int32 - run container + svm *simVM uid uuid.UUID imc *types.CustomizationSpec } @@ -453,6 +453,10 @@ func (vm *VirtualMachine) applyExtraConfig(spec *types.VirtualMachineConfigSpec) if len(changes) != 0 { Map.Update(vm, changes) } + + if vm.svm == nil { + vm.svm = createSimulationVM(vm) + } } func validateGuestID(id string) types.BaseMethodFault { @@ -1610,14 +1614,23 @@ func (c *powerVMTask) Run(task *Task) (types.AnyType, types.BaseMethodFault) { return nil, new(types.InvalidState) } - c.run.start(c.ctx, c.VirtualMachine) + err := c.svm.start(c.ctx) + if err != nil { + return nil, &types.MissingPowerOnConfiguration{ + VAppConfigFault: types.VAppConfigFault{ + VimFault: types.VimFault{ + MethodFault: types.MethodFault{ + FaultCause: &types.LocalizedMethodFault{ + Fault: &types.SystemErrorFault{Reason: err.Error()}, + LocalizedMessage: err.Error()}}}}} + } c.ctx.postEvent( &types.VmStartingEvent{VmEvent: event}, &types.VmPoweredOnEvent{VmEvent: event}, ) c.customize(c.ctx) case types.VirtualMachinePowerStatePoweredOff: - c.run.stop(c.ctx, c.VirtualMachine) + c.svm.stop(c.ctx) c.ctx.postEvent( &types.VmStoppingEvent{VmEvent: event}, &types.VmPoweredOffEvent{VmEvent: event}, @@ -1630,7 +1643,7 @@ func (c *powerVMTask) Run(task *Task) (types.AnyType, types.BaseMethodFault) { } } - c.run.pause(c.ctx, c.VirtualMachine) + c.svm.pause(c.ctx) c.ctx.postEvent( &types.VmSuspendingEvent{VmEvent: event}, &types.VmSuspendedEvent{VmEvent: event}, @@ -1737,7 +1750,7 @@ func (vm *VirtualMachine) RebootGuest(ctx *Context, req *types.RebootGuest) soap } if vm.Guest.ToolsRunningStatus == string(types.VirtualMachineToolsRunningStatusGuestToolsRunning) { - vm.run.restart(ctx, vm) + vm.svm.restart(ctx) body.Res = new(types.RebootGuestResponse) } else { body.Fault_ = Fault("", new(types.ToolsUnavailable)) @@ -1825,7 +1838,7 @@ func (vm *VirtualMachine) DestroyTask(ctx *Context, req *types.Destroy_Task) soa Datacenter: &dc.Self, }) - vm.run.remove(vm) + vm.svm.remove(ctx) return nil, nil }) @@ -2321,7 +2334,7 @@ func (vm *VirtualMachine) ShutdownGuest(ctx *Context, c *types.ShutdownGuest) so ctx.postEvent(&types.VmGuestShutdownEvent{VmEvent: event}) _ = CreateTask(vm, "shutdownGuest", func(*Task) (types.AnyType, types.BaseMethodFault) { - vm.run.stop(ctx, vm) + vm.svm.stop(ctx) ctx.Map.Update(vm, []types.PropertyChange{ {Name: "runtime.powerState", Val: types.VirtualMachinePowerStatePoweredOff}, @@ -2354,7 +2367,7 @@ func (vm *VirtualMachine) StandbyGuest(ctx *Context, c *types.StandbyGuest) soap ctx.postEvent(&types.VmGuestStandbyEvent{VmEvent: event}) _ = CreateTask(vm, "standbyGuest", func(*Task) (types.AnyType, types.BaseMethodFault) { - vm.run.pause(ctx, vm) + vm.svm.pause(ctx) ctx.Map.Update(vm, []types.PropertyChange{ {Name: "runtime.powerState", Val: types.VirtualMachinePowerStateSuspended}, From 387dc6e56079d0468e035d0de65429c7982ee38b Mon Sep 17 00:00:00 2001 From: George Hicken Date: Sat, 22 Apr 2023 06:35:51 +0000 Subject: [PATCH 2/8] vcsim: support container backing for hosts This adds support for backing a host with a container in a similar manner to how we back VMs with containers. Hosts do not have the VM ExtraConfig mechanism, and "creation" of a host is more "register the existance of" vs VMs that are actively constructed from the provided spec. As such, this uses the advanced options mechanism provided by the per-host Option Manager instead of ExtraConfig, but following the same "RUN.container" key/value approach for defining a container backing. The created container for a host has the following volumes defined: * bootbank (read-only) * altbootbank (read-only) * OS-DATA (read-write) * datastore1 (read-write) The volumes have suitably formed UUIDs, are mounted under /vmfs/volumes, and have symlinked pretty names. The volumes are associated with the host via labels, allowing the use of filtered queries to retrieve volumes associated with a given host. All docker invocation is kept in container.go and out of the container_xxx.go files. Not clear this is a fundamental benefit, but should make it easier if we ever want to support remote docker hosts. --- simulator/cluster_compute_resource.go | 2 +- simulator/container.go | 104 +++++-- simulator/container_host_system.go | 265 ++++++++++++++++++ simulator/container_host_system_test.go | 48 ++++ simulator/container_virtual_machine.go | 113 ++++---- simulator/esx/host_config_filesystemvolume.go | 152 ++++++++++ simulator/esx/host_config_info.go | 1 + simulator/feature_test.go | 3 +- simulator/host_system.go | 29 +- simulator/virtual_machine.go | 10 +- 10 files changed, 651 insertions(+), 76 deletions(-) create mode 100644 simulator/container_host_system.go create mode 100644 simulator/container_host_system_test.go create mode 100644 simulator/esx/host_config_filesystemvolume.go diff --git a/simulator/cluster_compute_resource.go b/simulator/cluster_compute_resource.go index f86fa7ed4..abe001665 100644 --- a/simulator/cluster_compute_resource.go +++ b/simulator/cluster_compute_resource.go @@ -66,7 +66,7 @@ func (add *addHost) Run(task *Task) (types.AnyType, types.BaseMethodFault) { } host := NewHostSystem(template) - host.configure(spec, add.req.AsConnected) + host.configure(task.ctx, spec, add.req.AsConnected) task.ctx.Map.PutEntity(cr, task.ctx.Map.NewEntity(host)) host.Summary.Host = &host.Self diff --git a/simulator/container.go b/simulator/container.go index 1fd57a4d2..33ebe5ca4 100644 --- a/simulator/container.go +++ b/simulator/container.go @@ -36,6 +36,10 @@ var ( shell = "/bin/sh" ) +const ( + deleteWithContainer = "lifecycle=container" +) + func init() { if sh, err := exec.LookPath("bash"); err != nil { shell = sh @@ -156,25 +160,52 @@ func copyFromGuest(id string, src string, sink func(int64, io.Reader) error) err return errors.Join(err, errwait) } -// populateVolume creates a volume tightly associated with the specified container, populated with the provided files +// createVolume creates a volume populated with the provided files // If the header.Size is omitted or set to zero, then len(content+1) is used. -func populateVolume(containerName string, volumeName string, files []tarEntry) error { +// Docker appears to treat this volume create command as idempotent so long as it's identical +// to an existing volume, so we can use this both for creating volumes inline in container create (for labelling) and +// for population after. +// returns: +// +// uid - string +// err - error or nil +func createVolume(volumeName string, labels []string, files []tarEntry) (string, error) { image := os.Getenv("VCSIM_BUSYBOX") if image == "" { image = "busybox" } // TODO: do we need to cap name lengths so as not to overflow? - name := sanitizeName(containerName) + "--" + sanitizeName(volumeName) - cmd := exec.Command("docker", "run", "--rm", "-i", "-v", name+":"+"/"+name, image, "tar", "-C", "/"+name, "-xf", "-") + name := sanitizeName(volumeName) + uid := "" + + // label the volume if specified - this requires the volume be created before use + if len(labels) > 0 { + run := []string{"volume", "create"} + for i := range labels { + run = append(run, "--label", labels[i]) + } + run = append(run, name) + cmd := exec.Command("docker", run...) + out, err := cmd.Output() + if err != nil { + return "", err + } + uid = strings.TrimSpace(string(out)) + } + + run := []string{"run", "--rm", "-i"} + run = append(run, "-v", name+":/"+name) + run = append(run, image, "tar", "-C", "/"+name, "-xf", "-") + cmd := exec.Command("docker", run...) stdin, err := cmd.StdinPipe() if err != nil { - return err + return uid, err } err = cmd.Start() if err != nil { - return err + return uid, err } tw := tar.NewWriter(stdin) @@ -211,10 +242,10 @@ func populateVolume(containerName string, volumeName string, files []tarEntry) e } log.Printf("%s %s: %s %s", name, cmd.Args, err, stderr) - return errors.Join(err, err3) + return uid, errors.Join(err, err3) } - return err + return uid, err } // create @@ -223,7 +254,7 @@ func populateVolume(containerName string, volumeName string, files []tarEntry) e // - networks - set of bridges to connect the container to // - volumes - colon separated tuple of volume name to mount path. Passed directly to docker via -v so mount options can be postfixed. // - env - array of environment vairables in name=value form -// - image - the name of the container image to use, including tag +// - optsAndImage - pass-though options and must include at least the container image to use, including tag if necessary // - args - the command+args to pass to the container func create(ctx *Context, name string, id string, networks []string, volumes []string, ports []string, env []string, image string, args []string) (*container, error) { if len(image) == 0 { @@ -233,6 +264,12 @@ func create(ctx *Context, name string, id string, networks []string, volumes []s var c container c.name = constructContainerName(name, id) + for i := range volumes { + // we'll pre-create anonymous volumes, simply for labelling consistency + volName := strings.Split(volumes[i], ":") + createVolume(volName[0], []string{deleteWithContainer, "container=" + c.name}, nil) + } + // assemble env var dockerNet []string var dockerVol []string @@ -282,9 +319,9 @@ func create(ctx *Context, name string, id string, networks []string, volumes []s return &c, nil } -// populateVolume takes the specified files and writes them into a volume named for the container. -func (c *container) populateVolume(name string, files []tarEntry) error { - return populateVolume(c.name, name, files) +// createVolume takes the specified files and writes them into a volume named for the container. +func (c *container) createVolume(name string, labels []string, files []tarEntry) (string, error) { + return createVolume(c.name+"--"+name, append(labels, "container="+c.name), files) } // inspect retrieves and parses container properties into directly usable struct @@ -423,9 +460,10 @@ func (c *container) exec(ctx *Context, args []string) (string, error) { } // remove the container (if any) for the given vm. Considers removal of an uninitialized container success. +// Also removes volumes and networks that indicate they are lifecycle coupled with this container. // returns: // -// err - joined err from deletion of container and matching volume name +// err - joined err from deletion of container and any volumes or networks that have coupled lifecycle func (c *container) remove(ctx *Context) error { if c.id == "" { // consider absence success @@ -436,16 +474,44 @@ func (c *container) remove(ctx *Context) error { err := cmd.Run() if err != nil { log.Printf("%s %s: %s", c.name, cmd.Args, err) + return err } - // TODO: modify this to list all volumes with c.name prefix and delete them - necessary because populateVolume was generalized - cmd = exec.Command("docker", "volume", "rm", "-f", c.name) - err2 := cmd.Run() - if err2 != nil { - log.Printf("%s %s: %s", c.name, cmd.Args, err2) + cmd = exec.Command("docker", "volume", "ls", "-q", "--filter", "label=container="+c.name, "--filter", "label="+deleteWithContainer) + volumesToReap, lsverr := cmd.Output() + if lsverr != nil { + log.Printf("%s %s: %s", c.name, cmd.Args, lsverr) + } + + var rmverr error + if len(volumesToReap) > 0 { + run := []string{"volume", "rm", "-f"} + run = append(run, strings.Split(string(volumesToReap), "\n")...) + cmd = exec.Command("docker", run...) + rmverr = cmd.Run() + if rmverr != nil { + log.Printf("%s %s: %s", c.name, cmd.Args, rmverr) + } + } + + cmd = exec.Command("docker", "network", "ls", "-q", "--filter", "label=container="+c.name, "--filter", "label="+deleteWithContainer) + networksToReap, lsnerr := cmd.Output() + if lsnerr != nil { + log.Printf("%s %s: %s", c.name, cmd.Args, lsnerr) + } + + var rmnerr error + if len(networksToReap) > 0 { + run := []string{"network", "rm", "-f"} + run = append(run, strings.Split(string(volumesToReap), "\n")...) + cmd = exec.Command("docker", run...) + rmnerr = cmd.Run() + if rmnerr != nil { + log.Printf("%s %s: %s", c.name, cmd.Args, rmnerr) + } } - combinedErr := errors.Join(err, err2) + combinedErr := errors.Join(err, lsverr, rmverr, lsnerr, rmnerr) if combinedErr == nil { c.id = "" diff --git a/simulator/container_host_system.go b/simulator/container_host_system.go new file mode 100644 index 000000000..7fe90966e --- /dev/null +++ b/simulator/container_host_system.go @@ -0,0 +1,265 @@ +/* +Copyright (c) 2017 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package simulator + +import ( + "fmt" + "strings" + + "github.com/vmware/govmomi/vim25/types" +) + +const KiB = 1024 +const MiB = 1024 * KiB +const GiB = 1024 * MiB +const TiB = 1024 * GiB +const Pib = 1024 * TiB + +const KB = 1000 +const MB = 1000 * KB +const GB = 1000 * MB +const TB = 1000 * GB +const PB = 1000 * TB + +type simHost struct { + host *HostSystem + c *container +} + +// createSimulationHost inspects the provided HostSystem and creates a simHost binding for it if +// the vm.Config.ExtraConfig set contains a key "RUN.container". +// If the ExtraConfig set does not contain that key, this returns nil. +// Methods on the simHost type are written to check for nil object so the return from this call can be blindly +// assigned and invoked without the caller caring about whether a binding for a backing container was warranted. +func createSimulationHost(ctx *Context, host *HostSystem) (*simHost, error) { + sh := &simHost{ + host: host, + } + + simulated := false + for _, opt := range host.HostSystem.Config.Option { + val := opt.GetOptionValue() + if val.Key == "RUN.container" { + simulated = true + break + } + } + + if !simulated { + return nil, nil + } + + // assemble env + var dockerEnv []string + var dockerVol []string + var dockerNet []string + var symlinkCmds [][]string + + var err error + // purge template filesystem info - this is either going to be replaced with user supplied info from HostConnectSpec.DynamicData or the + // default sim setup + // TODO: move this into DynamicData in the HostConnectSpec? Perhaps provide a method to populate the dyndata with a "default" sim config + if host.Config.FileSystemVolume == nil { + host.Config.FileSystemVolume = &types.HostFileSystemVolumeInfo{ + VolumeTypeList: []string{"VMFS", "OTHER"}, + } + } + if host.Config.FileSystemVolume.MountInfo == nil { + host.Config.FileSystemVolume.MountInfo = defaultSimVolumes + } + + // TODO: handle the case that neither name nor GUID are specified for the host + containerName := constructContainerName("esx"+host.Summary.Config.Name, host.Summary.Host.ServerGUID) + + for i := range host.Config.FileSystemVolume.MountInfo { + info := &host.Config.FileSystemVolume.MountInfo[i] + name := info.Volume.GetHostFileSystemVolume().Name + + // NOTE: if we ever need persistence cross-invocation we can look at encoding the disk info as a label + labels := []string{"name=" + name, "container=" + containerName, deleteWithContainer} + dockerUuid, err := createVolume("", labels, nil) + if err != nil { + return nil, err + } + + uuid := volumeIDtoHostVolumeUUID(dockerUuid) + name = strings.Replace(name, uuidToken, uuid, -1) + + switch vol := info.Volume.(type) { + case *types.HostVmfsVolume: + vol.BlockSizeMb = 1 + vol.BlockSize = KiB + vol.UnmapGranularity = KiB + vol.UnmapPriority = "low" + vol.MajorVersion = 6 + vol.Version = "6.82" + vol.Uuid = uuid + vol.HostFileSystemVolume.Name = name + for e := range vol.Extent { + vol.Extent[e].DiskName = "____simulated_volume_____" + if vol.Extent[e].Partition == 0 { + // HACK: this should be unique within the diskname, but for now this will suffice + // partitions start at 1 + vol.Extent[e].Partition = int32(e + 1) + } + } + vol.Ssd = types.NewBool(true) + vol.Local = types.NewBool(true) + case *types.HostVfatVolume: + vol.HostFileSystemVolume.Name = name + } + + info.VStorageSupport = "vStorageUnsupported" + + info.MountInfo.Path = "/vmfs/volumes/" + uuid + info.MountInfo.Mounted = types.NewBool(true) + info.MountInfo.Accessible = types.NewBool(true) + if info.MountInfo.AccessMode == "" { + info.MountInfo.AccessMode = "readWrite" + } + + opt := "rw" + if info.MountInfo.AccessMode == "readOnly" { + opt = "ro" + } + + dockerVol = append(dockerVol, fmt.Sprintf("%s:/vmfs/volumes/%s:%s", dockerUuid, uuid, opt)) + symlinkCmds = append(symlinkCmds, []string{"ln", "-s", fmt.Sprintf("/vmfs/volumes/%s", uuid), fmt.Sprintf("/vmfs/volumes/%s", name)}) + if strings.HasPrefix(name, "OSDATA") { + symlinkCmds = append(symlinkCmds, []string{"mkdir", "-p", "/var/lib/vmware"}) + symlinkCmds = append(symlinkCmds, []string{"ln", "-s", fmt.Sprintf("/vmfs/volumes/%s", uuid), "/var/lib/vmware/osdata"}) + } + } + + // TODO: extract the underlay's from a topology config + // create a bridge for each broadcast domain a pnic is connected to + dockerNet = append(dockerNet, defaultUnderlayBridgeName) + + // TODO: add in vSwitches if we know them at this point + + // - a pnic does not have an IP so this is purely a connectivity statement, not a network identity + // ? how is this underlay topology expressed? Initially we can assume a flat topology with all hosts on the same broadcast domain + + // if there's a DVS that doesn't have a bridge, create the bridge + + sh.c, err = create(ctx, "esx"+host.Summary.Config.Name, host.Summary.Host.ServerGUID, dockerNet, dockerVol, nil, dockerEnv, "alpine", []string{"sleep", "infinity"}) + if err != nil { + return nil, err + } + + err = sh.c.start(ctx) + if err != nil { + return nil, err + } + + // create symlinks from /vmfs/volumes/ for the Volume Name - the direct mount (path) is only the uuid + // ? can we do this via a script in the ESX image? are the volume names exposed in any manner instead the host? They must be because these mounts exist + // but where does that come from? Chicken and egg problem? ConfigStore? + for _, symlink := range symlinkCmds { + _, err := sh.c.exec(ctx, symlink) + if err != nil { + return nil, err + } + } + + return sh, nil +} + +// remove destroys the container associated with the host and any volumes with labels specifying their lifecycle +// is coupled with the container +func (sh *simHost) remove(ctx *Context) error { + return sh.c.remove(ctx) +} + +// volumeIDtoHostVolumeUUID takes the 64 char docker uuid and converts it into a 32char ESX form of 8-8-4-12 +// Perhaps we should do this using an md5 rehash, but instead we just take the first 32char for ease of cross-reference. +func volumeIDtoHostVolumeUUID(id string) string { + return fmt.Sprintf("%s-%s-%s-%s", id[0:8], id[8:16], id[16:20], id[20:32]) +} + +// By reference to physical system, partition numbering tends to work out like this: +// 1. EFI System (100 MB) +// Free space (1.97 MB) +// 5. Basic Data (4 GB) (bootbank1) +// 6. Basic Data (4 GB) (bootbank2) +// 7. VMFSL (119.9 GB) (os-data) +// 8. VMFS (1 TB) (datastore1) +// I assume the jump from 1 -> 5 harks back to the primary/logical partitions from MBT days +const uuidToken = "%__UUID__%" + +var defaultSimVolumes = []types.HostFileSystemMountInfo{ + { + MountInfo: types.HostMountInfo{ + AccessMode: "readWrite", + }, + Volume: &types.HostVmfsVolume{ + HostFileSystemVolume: types.HostFileSystemVolume{ + Type: "VMFS", + Name: "datastore1", + Capacity: 1 * TiB, + }, + Extent: []types.HostScsiDiskPartition{ + { + Partition: 8, + }, + }, + }, + }, + { + MountInfo: types.HostMountInfo{ + AccessMode: "readWrite", + }, + Volume: &types.HostVmfsVolume{ + HostFileSystemVolume: types.HostFileSystemVolume{ + Type: "OTHER", + Name: "OSDATA-%__UUID__%", + Capacity: 128 * GiB, + }, + Extent: []types.HostScsiDiskPartition{ + { + Partition: 7, + }, + }, + }, + }, + { + MountInfo: types.HostMountInfo{ + AccessMode: "readOnly", + }, + Volume: &types.HostVfatVolume{ + HostFileSystemVolume: types.HostFileSystemVolume{ + Type: "OTHER", + Name: "BOOTBANK1", + Capacity: 4 * GiB, + }, + }, + }, + { + MountInfo: types.HostMountInfo{ + AccessMode: "readOnly", + }, + Volume: &types.HostVfatVolume{ + HostFileSystemVolume: types.HostFileSystemVolume{ + Type: "OTHER", + Name: "BOOTBANK2", + Capacity: 4 * GiB, + }, + }, + }, +} + +const defaultUnderlayBridgeName = "vcsim-underlay" diff --git a/simulator/container_host_system_test.go b/simulator/container_host_system_test.go new file mode 100644 index 000000000..5995165f8 --- /dev/null +++ b/simulator/container_host_system_test.go @@ -0,0 +1,48 @@ +/* +Copyright (c) 2019 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package simulator + +import ( + "testing" + + "github.com/vmware/govmomi/simulator/esx" + "github.com/vmware/govmomi/vim25/types" +) + +func TestSimHost(t *testing.T) { + m := ESX() + + defer m.Remove() + + err := m.Create() + if err != nil { + t.Fatal(err) + } + + hs := NewHostSystem(esx.HostSystem) + if hs.Summary.Runtime != &hs.Runtime { + t.Fatal("expected hs.Summary.Runtime == &hs.Runtime; got !=") + } + + hs.HostSystem.Config.Option = append(hs.HostSystem.Config.Option, &types.OptionValue{Key: "RUN.container"}) + hs.HostSystem.Config.FileSystemVolume = nil + + hs.configure(SpoofContext(), types.HostConnectSpec{}, true) + + hs.sh.remove(SpoofContext()) + +} diff --git a/simulator/container_virtual_machine.go b/simulator/container_virtual_machine.go index 895dd4d8a..cc67d063f 100644 --- a/simulator/container_virtual_machine.go +++ b/simulator/container_virtual_machine.go @@ -51,6 +51,26 @@ type simVM struct { c *container } +// createSimulationVM inspects the provided VirtualMachine and creates a simVM binding for it if +// the vm.Config.ExtraConfig set contains a key "RUN.container". +// If the ExtraConfig set does not contain that key, this returns nil. +// Methods on the simVM type are written to check for nil object so the return from this call can be blindly +// assigned and invoked without the caller caring about whether a binding for a backing container was warranted. +func createSimulationVM(vm *VirtualMachine) *simVM { + svm := &simVM{ + vm: vm, + } + + for _, opt := range vm.Config.ExtraConfig { + val := opt.GetOptionValue() + if val.Key == "RUN.container" { + return svm + } + } + + return nil +} + // applies container network settings to vm.Guest properties. func (svm *simVM) syncNetworkConfigToVMGuestProperties() error { if svm == nil { @@ -131,8 +151,8 @@ func (svm *simVM) prepareGuestOperation(auth types.BaseGuestAuthentication) type return nil } -// createDMI writes BIOS UUID DMI files to a container volume -func (svm *simVM) createDMI() error { +// populateDMI writes BIOS UUID DMI files to a container volume +func (svm *simVM) populateDMI() error { if svm.c == nil { return nil } @@ -154,27 +174,8 @@ func (svm *simVM) createDMI() error { }, } - return svm.c.populateVolume("dmi", files) -} - -// createSimulationVM inspects the provided VirtualMachine and creates a simulationVM binding for it if -// the vm.Config.ExtraConfig set contains a key "RUN.container". -// If the ExtraConfig set does not contain that key, this returns nil. -// Methods on the simVM type are written to check for nil object so the return from this call can be blindly -// assigned and invoked without the caller caring about whether a binding for a backing container was warranted. -func createSimulationVM(vm *VirtualMachine) *simVM { - svm := &simVM{ - vm: vm, - } - - for _, opt := range vm.Config.ExtraConfig { - val := opt.GetOptionValue() - if val.Key == "RUN.container" { - return svm - } - } - - return nil + _, err := svm.c.createVolume("dmi", []string{deleteWithContainer}, files) + return err } // start runs the container if specified by the RUN.container extraConfig property. @@ -268,10 +269,10 @@ func (svm *simVM) start(ctx *Context) error { } if mountDMI { - // not combined with the test assembling volumes because we want to have the container name - // set so the volume can be named based on that. - // TODO: rework volume creation to use labels and consider ditching the reliance on names for association - err = svm.createDMI() + // not combined with the test assembling volumes because we want to have the container name first. + // cannot add a label to a volume after creation, so if we want to associate with the container ID the + // container must come first + err = svm.populateDMI() if err != nil { return err } @@ -328,13 +329,15 @@ func (svm *simVM) start(ctx *Context) error { // stop the container (if any) for the given vm. func (svm *simVM) stop(ctx *Context) error { - if svm != nil { - err := svm.c.stop(ctx) - if err != nil { - log.Printf("%s %s: %s", svm.vm.Name, "stop", err) + if svm == nil || svm.c == nil { + return nil + } - return err - } + err := svm.c.stop(ctx) + if err != nil { + log.Printf("%s %s: %s", svm.vm.Name, "stop", err) + + return err } ctx.Map.Update(svm.vm, toolsNotRunning) @@ -344,13 +347,15 @@ func (svm *simVM) stop(ctx *Context) error { // pause the container (if any) for the given vm. func (svm *simVM) pause(ctx *Context) error { - if svm != nil { - err := svm.c.pause(ctx) - if err != nil { - log.Printf("%s %s: %s", svm.vm.Name, "pause", err) + if svm == nil || svm.c == nil { + return nil + } - return err - } + err := svm.c.pause(ctx) + if err != nil { + log.Printf("%s %s: %s", svm.vm.Name, "pause", err) + + return err } ctx.Map.Update(svm.vm, toolsNotRunning) @@ -360,13 +365,15 @@ func (svm *simVM) pause(ctx *Context) error { // restart the container (if any) for the given vm. func (svm *simVM) restart(ctx *Context) error { - if svm != nil { - err := svm.c.restart(ctx) - if err != nil { - log.Printf("%s %s: %s", svm.vm.Name, "restart", err) + if svm == nil || svm.c == nil { + return nil + } - return err - } + err := svm.c.restart(ctx) + if err != nil { + log.Printf("%s %s: %s", svm.vm.Name, "restart", err) + + return err } ctx.Map.Update(svm.vm, toolsRunning) @@ -376,20 +383,22 @@ func (svm *simVM) restart(ctx *Context) error { // remove the container (if any) for the given vm. func (svm *simVM) remove(ctx *Context) error { - if svm != nil { - err := svm.c.remove(ctx) - if err != nil { - log.Printf("%s %s: %s", svm.vm.Name, "remove", err) + if svm == nil || svm.c == nil { + return nil + } - return err - } + err := svm.c.remove(ctx) + if err != nil { + log.Printf("%s %s: %s", svm.vm.Name, "remove", err) + + return err } return nil } func (svm *simVM) exec(ctx *Context, auth types.BaseGuestAuthentication, args []string) (string, types.BaseMethodFault) { - if svm == nil { + if svm == nil || svm.c == nil { return "", nil } diff --git a/simulator/esx/host_config_filesystemvolume.go b/simulator/esx/host_config_filesystemvolume.go new file mode 100644 index 000000000..ac841e05a --- /dev/null +++ b/simulator/esx/host_config_filesystemvolume.go @@ -0,0 +1,152 @@ +/* +Copyright (c) 2017-2023 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package esx + +import "github.com/vmware/govmomi/vim25/types" + +const KiB = 1024 +const MiB = 1024 * KiB +const GiB = 1024 * MiB +const TiB = 1024 * GiB +const Pib = 1024 * TiB + +const KB = 1000 +const MB = 1000 * KB +const GB = 1000 * MB +const TB = 1000 * GB +const PB = 1000 * TB + +// HostConfigInfo is the default template for the HostSystem config property. +// Capture method: +// govc object.collect -s -dump HostSystem:ha-host config.fileSystemVolume +var HostFileSystemVolumeInfo = types.HostFileSystemVolumeInfo{ + VolumeTypeList: []string{"VMFS", "NFS", "NFS41", "vsan", "VVOL", "VFFS", "OTHER", "PMEM"}, + MountInfo: []types.HostFileSystemMountInfo{ + { + MountInfo: types.HostMountInfo{ + Path: "/vmfs/volumes/deadbeef-01234567-89ab-cdef00000003", + AccessMode: "readWrite", + Mounted: types.NewBool(true), + Accessible: types.NewBool(true), + InaccessibleReason: "", + MountFailedReason: "", + }, + Volume: &types.HostVmfsVolume{ + HostFileSystemVolume: types.HostFileSystemVolume{ + Type: "VMFS", + Name: "datastore1", + Capacity: 3.5 * TiB, + }, + BlockSizeMb: 1, + BlockSize: KiB, + UnmapGranularity: KiB, + UnmapPriority: "low", + UnmapBandwidthSpec: (*types.VmfsUnmapBandwidthSpec)(nil), + MaxBlocks: 61 * MiB, + MajorVersion: 6, + Version: "6.82", + Uuid: "deadbeef-01234567-89ab-cdef00000003", + Extent: []types.HostScsiDiskPartition{ + { + DiskName: "____simulated_volumes_____", + Partition: 8, + }, + }, + VmfsUpgradable: false, + ForceMountedInfo: (*types.HostForceMountedInfo)(nil), + Ssd: types.NewBool(true), + Local: types.NewBool(true), + ScsiDiskType: "", + }, + VStorageSupport: "vStorageUnsupported", + }, + { + MountInfo: types.HostMountInfo{ + Path: "/vmfs/volumes/deadbeef-01234567-89ab-cdef00000002", + AccessMode: "readWrite", + Mounted: types.NewBool(true), + Accessible: types.NewBool(true), + InaccessibleReason: "", + MountFailedReason: "", + }, + Volume: &types.HostVmfsVolume{ + HostFileSystemVolume: types.HostFileSystemVolume{ + Type: "OTHER", + Name: "OSDATA-deadbeef-01234567-89ab-cdef00000002", + Capacity: 128 * GiB, + }, + BlockSizeMb: 1, + BlockSize: KiB, + UnmapGranularity: 0, + UnmapPriority: "", + UnmapBandwidthSpec: (*types.VmfsUnmapBandwidthSpec)(nil), + MaxBlocks: 256 * KiB, + MajorVersion: 1, + Version: "1.00", + Uuid: "deadbeef-01234567-89ab-cdef00000002", + Extent: []types.HostScsiDiskPartition{ + { + DiskName: "____simulated_volumes_____", + Partition: 7, + }, + }, + VmfsUpgradable: false, + ForceMountedInfo: (*types.HostForceMountedInfo)(nil), + Ssd: types.NewBool(true), + Local: types.NewBool(true), + ScsiDiskType: "", + }, + VStorageSupport: "vStorageUnsupported", + }, + { + MountInfo: types.HostMountInfo{ + Path: "/vmfs/volumes/deadbeef-01234567-89ab-cdef00000001", + AccessMode: "readOnly", + Mounted: types.NewBool(true), + Accessible: types.NewBool(true), + InaccessibleReason: "", + MountFailedReason: "", + }, + Volume: &types.HostVfatVolume{ + HostFileSystemVolume: types.HostFileSystemVolume{ + Type: "OTHER", + Name: "BOOTBANK1", + Capacity: 4 * GiB, + }, + }, + VStorageSupport: "", + }, + { + MountInfo: types.HostMountInfo{ + Path: "/vmfs/volumes/deadbeef-01234567-89ab-cdef00000000", + AccessMode: "readOnly", + Mounted: types.NewBool(true), + Accessible: types.NewBool(true), + InaccessibleReason: "", + MountFailedReason: "", + }, + Volume: &types.HostVfatVolume{ + HostFileSystemVolume: types.HostFileSystemVolume{ + Type: "OTHER", + Name: "BOOTBANK2", + Capacity: 4 * GiB, + }, + }, + VStorageSupport: "", + }, + }, +} diff --git a/simulator/esx/host_config_info.go b/simulator/esx/host_config_info.go index ced194f95..d56c3b607 100644 --- a/simulator/esx/host_config_info.go +++ b/simulator/esx/host_config_info.go @@ -50,6 +50,7 @@ var HostConfigInfo = types.HostConfigInfo{ ConsoleReservation: (*types.ServiceConsoleReservationInfo)(nil), VirtualMachineReservation: (*types.VirtualMachineMemoryReservationInfo)(nil), StorageDevice: &HostStorageDeviceInfo, + FileSystemVolume: &HostFileSystemVolumeInfo, SystemFile: nil, Network: &types.HostNetworkInfo{ Vswitch: []types.HostVirtualSwitch{ diff --git a/simulator/feature_test.go b/simulator/feature_test.go index be5bd532e..8ff91b116 100644 --- a/simulator/feature_test.go +++ b/simulator/feature_test.go @@ -128,6 +128,7 @@ func Example_runContainer() { // Create a new VM task, err := f.VmFolder.CreateVM(ctx, spec, pool, nil) + if err != nil { log.Fatal(err) } @@ -152,7 +153,7 @@ func Example_runContainer() { cmd.Stdout = &buf err = cmd.Run() res := buf.String() - // TODO: look at switching to assert/require instead of raw tests + if err != nil || strings.TrimSpace(res) != fcontent { log.Fatal(err, buf.String()) } diff --git a/simulator/host_system.go b/simulator/host_system.go index f28101a8c..3bf7122f1 100644 --- a/simulator/host_system.go +++ b/simulator/host_system.go @@ -34,6 +34,8 @@ var ( type HostSystem struct { mo.HostSystem + + sh *simHost } func asHostSystemMO(obj mo.Reference) (*mo.HostSystem, bool) { @@ -72,6 +74,9 @@ func NewHostSystem(host mo.HostSystem) *HostSystem { deepCopy(hs.Config, cfg) hs.Config = cfg + simOption := types.OptionDef{ElementDescription: types.ElementDescription{Key: "RUN.container"}} + hs.Config.OptionDef = append(hs.Config.OptionDef, simOption) + config := []struct { ref **types.ManagedObjectReference obj mo.Reference @@ -92,7 +97,7 @@ func NewHostSystem(host mo.HostSystem) *HostSystem { return hs } -func (h *HostSystem) configure(spec types.HostConnectSpec, connected bool) { +func (h *HostSystem) configure(ctx *Context, spec types.HostConnectSpec, connected bool) { h.Runtime.ConnectionState = types.HostSystemConnectionStateDisconnected if connected { h.Runtime.ConnectionState = types.HostSystemConnectionStateConnected @@ -106,6 +111,15 @@ func (h *HostSystem) configure(spec types.HostConnectSpec, connected bool) { id := newUUID(h.Name) h.Summary.Hardware.Uuid = id h.Hardware.SystemInfo.Uuid = id + + // bind to a simulation host with container backing if specified by options + // TODO: decide whether to require this at host creation or allow binding during a reconfigure + // TODO: handle the error return + var err error + h.sh, err = createSimulationHost(ctx, h) + if err != nil { + panic("failed to create simulation host and no path to return error: " + err.Error()) + } } func (h *HostSystem) event() types.HostEvent { @@ -207,7 +221,7 @@ func CreateStandaloneHost(ctx *Context, f *Folder, spec types.HostConnectSpec) ( pool := NewResourcePool() host := NewHostSystem(template) - host.configure(spec, false) + host.configure(ctx, spec, false) summary := new(types.ComputeResourceSummary) addComputeResource(summary, host) @@ -247,6 +261,17 @@ func (h *HostSystem) DestroyTask(ctx *Context, req *types.Destroy_Task) soap.Has f := ctx.Map.getEntityParent(h, "Folder").(*Folder) folderRemoveChild(ctx, &f.Folder, h.Reference()) + err := h.sh.remove(ctx) + + if err != nil { + return nil, &types.RuntimeFault{ + MethodFault: types.MethodFault{ + FaultCause: &types.LocalizedMethodFault{ + Fault: &types.SystemErrorFault{Reason: err.Error()}, + LocalizedMessage: err.Error()}}} + } + + // TODO: should there be events on lifecycle operations as with VMs? return nil, nil }) diff --git a/simulator/virtual_machine.go b/simulator/virtual_machine.go index dd755ded6..d65937c85 100644 --- a/simulator/virtual_machine.go +++ b/simulator/virtual_machine.go @@ -1814,6 +1814,7 @@ func (vm *VirtualMachine) DestroyTask(ctx *Context, req *types.Destroy_Task) soa task := CreateTask(vm, "destroy", func(t *Task) (types.AnyType, types.BaseMethodFault) { if dc == nil { return nil, &types.ManagedObjectNotFound{Obj: vm.Self} // If our Parent was destroyed, so were we. + // TODO: should this also trigger container removal? } r := vm.UnregisterVM(ctx, &types.UnregisterVM{ @@ -1838,7 +1839,14 @@ func (vm *VirtualMachine) DestroyTask(ctx *Context, req *types.Destroy_Task) soa Datacenter: &dc.Self, }) - vm.svm.remove(ctx) + err := vm.svm.remove(ctx) + if err != nil { + return nil, &types.RuntimeFault{ + MethodFault: types.MethodFault{ + FaultCause: &types.LocalizedMethodFault{ + Fault: &types.SystemErrorFault{Reason: err.Error()}, + LocalizedMessage: err.Error()}}} + } return nil, nil }) From 6b7b0dfc202fa09fa653626544a540dde00efe57 Mon Sep 17 00:00:00 2001 From: George Hicken Date: Wed, 26 Apr 2023 22:38:47 +0000 Subject: [PATCH 3/8] vcsim: per-host OptionManager instances and differentiated roles Creates an OptionManager instance per-host, with valued seeded from the ESX template but not directly referencing it, ie. template changes will not reflect into existing OptionManager instances. OptionManager Query and Update methods work as expected. Changes made via OptionManager are reflected into host.Config.Options array, but it's a unidirectional reflection. This is done to match infered behaviour of ESX. There are two OptionManager instances for ESX (and I assume for VC), For ESX they are found at: * ServiceContent.setting * ConfigManager.advancedOptions The settings for ESX are empty, and the template had named the adv opts as settings. This adds an empty Setting array in the templates to clearly differentiate which set of BaseOptionValues is used to populate which OptionManager instance. Follow up required for: * VC to determine what the contents of adv opts should be. * whether HostConfigInfo.Options is adv opts, or combined set --- simulator/container_host_system.go | 39 ++---- simulator/container_host_system_test.go | 174 +++++++++++++++++++++++- simulator/esx/setting.go | 10 +- simulator/host_system.go | 45 +++++- simulator/host_system_test.go | 7 +- simulator/option_manager.go | 33 ++++- simulator/vpx/setting.go | 2 + 7 files changed, 271 insertions(+), 39 deletions(-) diff --git a/simulator/container_host_system.go b/simulator/container_host_system.go index 7fe90966e..121671c49 100644 --- a/simulator/container_host_system.go +++ b/simulator/container_host_system.go @@ -20,6 +20,7 @@ import ( "fmt" "strings" + "github.com/vmware/govmomi/vim25/methods" "github.com/vmware/govmomi/vim25/types" ) @@ -50,17 +51,13 @@ func createSimulationHost(ctx *Context, host *HostSystem) (*simHost, error) { host: host, } - simulated := false - for _, opt := range host.HostSystem.Config.Option { - val := opt.GetOptionValue() - if val.Key == "RUN.container" { - simulated = true - break + advOpts := ctx.Map.Get(host.ConfigManager.AdvancedOption.Reference()).(*OptionManager) + fault := advOpts.QueryOptions(&types.QueryOptions{Name: "RUN.container"}).(*methods.QueryOptionsBody).Fault() + if fault != nil { + if _, ok := fault.VimFault().(*types.InvalidName); ok { + return nil, nil } - } - - if !simulated { - return nil, nil + return nil, fmt.Errorf("errror retrieving container backing from host config manager: %+v", fault.VimFault()) } // assemble env @@ -70,20 +67,10 @@ func createSimulationHost(ctx *Context, host *HostSystem) (*simHost, error) { var symlinkCmds [][]string var err error - // purge template filesystem info - this is either going to be replaced with user supplied info from HostConnectSpec.DynamicData or the - // default sim setup - // TODO: move this into DynamicData in the HostConnectSpec? Perhaps provide a method to populate the dyndata with a "default" sim config - if host.Config.FileSystemVolume == nil { - host.Config.FileSystemVolume = &types.HostFileSystemVolumeInfo{ - VolumeTypeList: []string{"VMFS", "OTHER"}, - } - } - if host.Config.FileSystemVolume.MountInfo == nil { - host.Config.FileSystemVolume.MountInfo = defaultSimVolumes - } - // TODO: handle the case that neither name nor GUID are specified for the host - containerName := constructContainerName("esx"+host.Summary.Config.Name, host.Summary.Host.ServerGUID) + hName := host.Summary.Config.Name + hUuid := host.Summary.Hardware.Uuid + containerName := constructContainerName(hName, hUuid) for i := range host.Config.FileSystemVolume.MountInfo { info := &host.Config.FileSystemVolume.MountInfo[i] @@ -156,7 +143,7 @@ func createSimulationHost(ctx *Context, host *HostSystem) (*simHost, error) { // if there's a DVS that doesn't have a bridge, create the bridge - sh.c, err = create(ctx, "esx"+host.Summary.Config.Name, host.Summary.Host.ServerGUID, dockerNet, dockerVol, nil, dockerEnv, "alpine", []string{"sleep", "infinity"}) + sh.c, err = create(ctx, hName, hUuid, dockerNet, dockerVol, nil, dockerEnv, "alpine", []string{"sleep", "infinity"}) if err != nil { return nil, err } @@ -182,6 +169,10 @@ func createSimulationHost(ctx *Context, host *HostSystem) (*simHost, error) { // remove destroys the container associated with the host and any volumes with labels specifying their lifecycle // is coupled with the container func (sh *simHost) remove(ctx *Context) error { + if sh == nil { + return nil + } + return sh.c.remove(ctx) } diff --git a/simulator/container_host_system_test.go b/simulator/container_host_system_test.go index 5995165f8..86c293f7e 100644 --- a/simulator/container_host_system_test.go +++ b/simulator/container_host_system_test.go @@ -19,11 +19,14 @@ package simulator import ( "testing" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "github.com/vmware/govmomi/simulator/esx" + "github.com/vmware/govmomi/vim25/methods" "github.com/vmware/govmomi/vim25/types" ) -func TestSimHost(t *testing.T) { +func TestHostOptionManager(t *testing.T) { m := ESX() defer m.Remove() @@ -34,15 +37,174 @@ func TestSimHost(t *testing.T) { } hs := NewHostSystem(esx.HostSystem) - if hs.Summary.Runtime != &hs.Runtime { - t.Fatal("expected hs.Summary.Runtime == &hs.Runtime; got !=") + + advOpts, ok := Map.Get(hs.ConfigManager.AdvancedOption.Reference()).(*OptionManager) + require.True(t, ok, "Expected to inflate OptionManager from reference") + + option := &types.OptionValue{ + Key: "TEST.hello", + Value: "world", + } + + fault := advOpts.QueryOptions(&types.QueryOptions{Name: option.Key}).(*methods.QueryOptionsBody).Fault() + require.IsType(t, &types.InvalidName{}, fault.VimFault(), "Expected new host from template not to have test option set") + + fault = advOpts.UpdateOptions(&types.UpdateOptions{ChangedValue: []types.BaseOptionValue{option}}).Fault() + require.Nil(t, fault, "Expected setting test option to succeed") + + queryRes := advOpts.QueryOptions(&types.QueryOptions{Name: option.Key}).(*methods.QueryOptionsBody).Res + require.Equal(t, 1, len(queryRes.Returnval), "Expected query of set option to succeed") + require.Equal(t, option.Value, queryRes.Returnval[0].GetOptionValue().Value, "Expected set value") + + option2 := &types.OptionValue{ + Key: "TEST.hello", + Value: "goodbye", + } + + fault = advOpts.UpdateOptions(&types.UpdateOptions{ChangedValue: []types.BaseOptionValue{option2}}).Fault() + require.Nil(t, fault, "Expected update of test option to succeed") + + queryRes = advOpts.QueryOptions(&types.QueryOptions{Name: option2.Key}).(*methods.QueryOptionsBody).Res + require.Equal(t, 1, len(queryRes.Returnval), "Expected query of updated option to succeed") + require.Equal(t, option2.Value, queryRes.Returnval[0].GetOptionValue().Value, "Expected updated value") + + hs.configure(SpoofContext(), types.HostConnectSpec{}, true) + assert.Nil(t, hs.sh, "Expected not to have container backing if not requested") +} + +func TestSyncWithOptionsStruct(t *testing.T) { + m := ESX() + + defer m.Remove() + + err := m.Create() + if err != nil { + t.Fatal(err) + } + + hs := NewHostSystem(esx.HostSystem) + + advOpts, ok := Map.Get(hs.ConfigManager.AdvancedOption.Reference()).(*OptionManager) + require.True(t, ok, "Expected to inflate OptionManager from reference") + + option := &types.OptionValue{ + Key: "TEST.hello", + Value: "world", + } + + fault := advOpts.UpdateOptions(&types.UpdateOptions{ChangedValue: []types.BaseOptionValue{option}}).Fault() + require.Nil(t, fault, "Expected setting test option to succeed") + + assert.Equal(t, option, hs.Config.Option[1], "Expected mirror to reflect changes") +} + +func TestPerHostOptionManager(t *testing.T) { + m := ESX() + + defer m.Remove() + + err := m.Create() + if err != nil { + t.Fatal(err) + } + + hs := NewHostSystem(esx.HostSystem) + hs2 := NewHostSystem(esx.HostSystem) + + advOpts, ok := Map.Get(hs.ConfigManager.AdvancedOption.Reference()).(*OptionManager) + require.True(t, ok, "Expected to inflate OptionManager from reference") + + advOpts2 := Map.Get(hs2.ConfigManager.AdvancedOption.Reference()).(*OptionManager) + + option := &types.OptionValue{ + Key: "TEST.hello", + Value: "world", } - hs.HostSystem.Config.Option = append(hs.HostSystem.Config.Option, &types.OptionValue{Key: "RUN.container"}) - hs.HostSystem.Config.FileSystemVolume = nil + fault := advOpts.QueryOptions(&types.QueryOptions{Name: option.Key}).(*methods.QueryOptionsBody).Fault() + require.IsType(t, &types.InvalidName{}, fault.VimFault(), "Expected host from template not to have test option set") + + fault = advOpts.UpdateOptions(&types.UpdateOptions{ChangedValue: []types.BaseOptionValue{option}}).Fault() + require.Nil(t, fault, "Expected setting test option to succeed") + + queryRes := advOpts.QueryOptions(&types.QueryOptions{Name: option.Key}).(*methods.QueryOptionsBody).Res + require.Equal(t, 1, len(queryRes.Returnval), "Expected query of set option to succeed") + require.Equal(t, option.Value, queryRes.Returnval[0].GetOptionValue().Value, "Expected set value") + + fault = advOpts2.QueryOptions(&types.QueryOptions{Name: option.Key}).(*methods.QueryOptionsBody).Fault() + require.IsType(t, &types.InvalidName{}, fault.VimFault(), "Expected second host to be unchanged") + + option2 := &types.OptionValue{ + Key: "TEST.hello", + Value: "goodbye", + } + + fault = advOpts.UpdateOptions(&types.UpdateOptions{ChangedValue: []types.BaseOptionValue{option2}}).Fault() + require.Nil(t, fault, "Expected update of test option to succeed") + + queryRes = advOpts.QueryOptions(&types.QueryOptions{Name: option2.Key}).(*methods.QueryOptionsBody).Res + require.Equal(t, 1, len(queryRes.Returnval), "Expected query of updated option to succeed") + require.Equal(t, option2.Value, queryRes.Returnval[0].GetOptionValue().Value, "Expected updated value") + + assert.Equal(t, option2, hs.Config.Option[1], "Expected mirror to reflect changes") hs.configure(SpoofContext(), types.HostConnectSpec{}, true) + assert.Nil(t, hs.sh, "Expected not to have container backing if not requested") + + hs3 := NewHostSystem(esx.HostSystem) + + advOpts3 := Map.Get(hs3.ConfigManager.AdvancedOption.Reference()).(*OptionManager) + fault = advOpts3.QueryOptions(&types.QueryOptions{Name: option.Key}).(*methods.QueryOptionsBody).Fault() + require.IsType(t, &types.InvalidName{}, fault.VimFault(), "Expected host created after update not to inherit change") + +} + +func TestHostContainerBacking(t *testing.T) { + m := ESX() + + defer m.Remove() + + err := m.Create() + if err != nil { + t.Fatal(err) + } + + ctx := SpoofContext() + + hs := NewHostSystem(esx.HostSystem) + hs.configureContainerBacking(ctx, "alpine", defaultSimVolumes) + + hs.configure(ctx, types.HostConnectSpec{}, true) + + //TODO: assert there's a container representing the host (consider a separate test for matching datastores and networks) + + hs.sh.remove(ctx) +} + +func TestMultipleSimHost(t *testing.T) { + m := ESX() + + defer m.Remove() + + err := m.Create() + require.Nil(t, err, "expected successful creation of model") + + ctx := SpoofContext() + + hs := NewHostSystem(esx.HostSystem) + hs.configureContainerBacking(ctx, "alpine", defaultSimVolumes) + + hs.configure(ctx, types.HostConnectSpec{}, true) + // TODO: assert container present + + hs2 := NewHostSystem(esx.HostSystem) + hs2.configureContainerBacking(ctx, "alpine", defaultSimVolumes) + + hs2.configure(ctx, types.HostConnectSpec{}, true) + // TODO: assert 2nd container present - hs.sh.remove(SpoofContext()) + hs.sh.remove(ctx) + // TODO: assert one container plus volumes left + hs2.sh.remove(ctx) } diff --git a/simulator/esx/setting.go b/simulator/esx/setting.go index da5dca20f..54ec6ead0 100644 --- a/simulator/esx/setting.go +++ b/simulator/esx/setting.go @@ -21,14 +21,20 @@ import "github.com/vmware/govmomi/vim25/types" // HardwareVersion is the default VirtualMachine.Config.Version var HardwareVersion = "vmx-13" -// Setting is captured from ESX's HostSystem.configManager.advancedOption +// AdvancedOptions is captured from ESX's HostSystem.configManager.advancedOption // Capture method: // // govc object.collect -s -dump $(govc object.collect -s HostSystem:ha-host configManager.advancedOption) setting -var Setting = []types.BaseOptionValue{ +var AdvancedOptions = []types.BaseOptionValue{ // This list is currently pruned to include a single option for testing &types.OptionValue{ Key: "Config.HostAgent.log.level", Value: "info", }, } + +// Setting is captured from ESX's HostSystem.ServiceContent.setting +// Capture method: +// +// govc object.collect -s -dump OptionManager:HostAgentSettings setting +var Setting = []types.BaseOptionValue{} diff --git a/simulator/host_system.go b/simulator/host_system.go index 3bf7122f1..b334ac1c2 100644 --- a/simulator/host_system.go +++ b/simulator/host_system.go @@ -17,6 +17,7 @@ limitations under the License. package simulator import ( + "fmt" "net" "os" "time" @@ -30,6 +31,8 @@ import ( var ( hostPortUnique = os.Getenv("VCSIM_HOST_PORT_UNIQUE") == "true" + + globalHostCount = 0 ) type HostSystem struct { @@ -44,6 +47,10 @@ func asHostSystemMO(obj mo.Reference) (*mo.HostSystem, bool) { } func NewHostSystem(host mo.HostSystem) *HostSystem { + // lets us construct non-conflicting hostname automatically if omitted + // does not use the unique port to avoid constraints on port, such as >1024 + globalHostCount++ + if hostPortUnique { // configure unique port for each host port := &esx.HostSystem.Summary.Config.Port *port++ @@ -74,6 +81,11 @@ func NewHostSystem(host mo.HostSystem) *HostSystem { deepCopy(hs.Config, cfg) hs.Config = cfg + // copy over the reference advanced options so each host can have it's own, allowing hosts to be configured for + // container backing individually + deepCopy(esx.AdvancedOptions, &cfg.Option) + + // add a supported option to the AdvancedOption manager simOption := types.OptionDef{ElementDescription: types.ElementDescription{Key: "RUN.container"}} hs.Config.OptionDef = append(hs.Config.OptionDef, simOption) @@ -83,7 +95,7 @@ func NewHostSystem(host mo.HostSystem) *HostSystem { }{ {&hs.ConfigManager.DatastoreSystem, &HostDatastoreSystem{Host: &hs.HostSystem}}, {&hs.ConfigManager.NetworkSystem, NewHostNetworkSystem(&hs.HostSystem)}, - {&hs.ConfigManager.AdvancedOption, NewOptionManager(nil, esx.Setting)}, + {&hs.ConfigManager.AdvancedOption, NewOptionManager(nil, nil, &hs.Config.Option)}, {&hs.ConfigManager.FirewallSystem, NewHostFirewallSystem(&hs.HostSystem)}, {&hs.ConfigManager.StorageSystem, NewHostStorageSystem(&hs.HostSystem)}, } @@ -102,7 +114,10 @@ func (h *HostSystem) configure(ctx *Context, spec types.HostConnectSpec, connect if connected { h.Runtime.ConnectionState = types.HostSystemConnectionStateConnected } - if net.ParseIP(spec.HostName) != nil { + + if spec.HostName == "" { + spec.HostName = fmt.Sprintf("esx-%d", globalHostCount) + } else if net.ParseIP(spec.HostName) != nil { h.Config.Network.Vnic[0].Spec.Ip.IpAddress = spec.HostName } @@ -122,6 +137,32 @@ func (h *HostSystem) configure(ctx *Context, spec types.HostConnectSpec, connect } } +// configureContainerBacking sets up _this_ host for simulation using a container backing. +// Args: +// +// image - the container image with which to simulate the host +// mounts - array of mount info that should be translated into /vmfs/volumes/... mounts backed by container volumes +func (h *HostSystem) configureContainerBacking(ctx *Context, image string, mounts []types.HostFileSystemMountInfo) { + option := &types.OptionValue{ + Key: "RUN.container", + Value: image, + } + + advOpts := Map.Get(h.ConfigManager.AdvancedOption.Reference()).(*OptionManager) + fault := advOpts.UpdateOptions(&types.UpdateOptions{ChangedValue: []types.BaseOptionValue{option}}).Fault() + if fault != nil { + panic(fault) + } + + h.Config.FileSystemVolume = nil + if mounts != nil { + h.Config.FileSystemVolume = &types.HostFileSystemVolumeInfo{ + VolumeTypeList: []string{"VMFS", "OTHER"}, + MountInfo: mounts, + } + } +} + func (h *HostSystem) event() types.HostEvent { return types.HostEvent{ Event: types.Event{ diff --git a/simulator/host_system_test.go b/simulator/host_system_test.go index ffd43802f..9bfe26b6f 100644 --- a/simulator/host_system_test.go +++ b/simulator/host_system_test.go @@ -20,6 +20,7 @@ import ( "context" "testing" + "github.com/stretchr/testify/assert" "github.com/vmware/govmomi" "github.com/vmware/govmomi/find" "github.com/vmware/govmomi/object" @@ -127,9 +128,9 @@ func TestNewHostSystem(t *testing.T) { } hs := NewHostSystem(esx.HostSystem) - if hs.Summary.Runtime != &hs.Runtime { - t.Fatal("expected hs.Summary.Runtime == &hs.Runtime; got !=") - } + + assert.Equal(t, &hs.Runtime, hs.Summary.Runtime, "expected pointer to runtime in summary") + assert.False(t, esx.AdvancedOptions[0] == hs.Config.Option[0], "expected each host to have it's own advanced options") } func TestDestroyHostSystem(t *testing.T) { diff --git a/simulator/option_manager.go b/simulator/option_manager.go index efcdee215..1dd1688cd 100644 --- a/simulator/option_manager.go +++ b/simulator/option_manager.go @@ -28,19 +28,45 @@ import ( "github.com/vmware/govmomi/vim25/types" ) +// OptionManager is used in at least two locations for ESX: +// 1. ServiceContent.setting - this is empty on ESX and //TODO on VC +// 2. ConfigManager.advancedOption - this is where the bulk of the ESX settings are found type OptionManager struct { mo.OptionManager + + // mirror is an array to keep in sync with OptionManager.Settings. Necessary because we use append. + // uni-directional - changes made to the mirrored array are not reflected back to Settings + mirror *[]types.BaseOptionValue +} + +func asOptionManager(ctx *Context, obj mo.Reference) (*OptionManager, bool) { + om, ok := ctx.Map.Get(obj.Reference()).(*OptionManager) + return om, ok } -func NewOptionManager(ref *types.ManagedObjectReference, setting []types.BaseOptionValue) object.Reference { +// NewOptionManager constructs the type. If mirror is non-nil it takes precedence over settings, and settings is ignored. +// Args: +// - ref - used to set OptionManager.Self if non-nil +// - setting - initial options, may be nil. +// - mirror - options array to keep updated with the OptionManager.Settings, may be nil. +func NewOptionManager(ref *types.ManagedObjectReference, setting []types.BaseOptionValue, mirror *[]types.BaseOptionValue) object.Reference { s := &OptionManager{} + + s.Setting = setting + if mirror != nil { + s.mirror = mirror + s.Setting = *mirror + } + if ref != nil { s.Self = *ref } - s.Setting = setting + return s } +// init constructs the OptionManager for ServiceContent.setting from the template directories. +// This does _not_ construct the OptionManager for ConfigManager.advancedOption. func (m *OptionManager) init(r *Registry) { if len(m.Setting) == 0 { if r.IsVPX() { @@ -103,6 +129,9 @@ func (m *OptionManager) UpdateOptions(req *types.UpdateOptions) soap.HasFault { } m.Setting = append(m.Setting, change) + if m.mirror != nil { + *m.mirror = m.Setting + } } body.Res = new(types.UpdateOptionsResponse) diff --git a/simulator/vpx/setting.go b/simulator/vpx/setting.go index 7bbf0c02d..7625824da 100644 --- a/simulator/vpx/setting.go +++ b/simulator/vpx/setting.go @@ -18,6 +18,8 @@ package vpx import "github.com/vmware/govmomi/vim25/types" +// TODO: figure out whether this is Setting or AdvancedOptions - see esx/setting.go for the difference + // Setting is captured from VC's ServiceContent.OptionManager.setting var Setting = []types.BaseOptionValue{ // This list is currently pruned to include sso options only with sso.enabled set to false From 6248cbfe3ed1031d34ca462eba1337acc4720847 Mon Sep 17 00:00:00 2001 From: George Hicken Date: Fri, 7 Jul 2023 19:45:44 +0000 Subject: [PATCH 4/8] vcsim: create underlay network for pNICs in container-backed hosts Connects sim-hosts to bridges as specified in their config. The bridges to use for a given pNIC are expressed in the advanced options using the following pattern (example for pNIC 0): RUN.underlay.vmnic0=vcsim-mgmt-underlay This uses an existing bridge or creates a new one as needed. If a host has a container backing, all pNICs defined in the host template are discarded and new pNICs are created, 1 per underlay name provided to simulator.HostSystem.configureContainerBacking. This was the only sane way I found to indicate which bridges a host should be connected to. The IP assigned to the container is reflected into the various host.Config structures associated with the vmknic, eg. VirtualNicManagerInfo The simulator.HostSystem.getNetConfigInterface method is prvoided to allow a caller to retrieve all the various network entities associated with a NetConfig binding, eg. "management", "vmotion". Remove use of errors.Join to maintain support for older Go versions. Known issues: * podman volume ls filters act as OR instead of AND which results in all volumes being deleted any time a single host is removed. Issue opened and fixed in podman main. --- simulator/container.go | 144 +++++++++-- simulator/container_host_system.go | 191 ++++++++++---- simulator/container_host_system_test.go | 30 ++- simulator/esx/host_config_filesystemvolume.go | 1 + simulator/host_system.go | 236 +++++++++++++++++- simulator/host_system_test.go | 1 + 6 files changed, 519 insertions(+), 84 deletions(-) diff --git a/simulator/container.go b/simulator/container.go index 33ebe5ca4..6f626d311 100644 --- a/simulator/container.go +++ b/simulator/container.go @@ -24,6 +24,7 @@ import ( "fmt" "io" "log" + "net" "os" "os/exec" "path" @@ -38,6 +39,7 @@ var ( const ( deleteWithContainer = "lifecycle=container" + createdByVcsim = "createdBy=vcsim" ) func init() { @@ -97,6 +99,11 @@ func extractNameAndUid(containerName string) (name string, uid string, err error return parts[0], parts[1], nil } +func prefixToMask(prefix int) string { + mask := net.CIDRMask(prefix, 32) + return fmt.Sprintf("%d.%d.%d.%d", mask[0], mask[1], mask[2], mask[3]) +} + type tarEntry struct { header *tar.Header content []byte @@ -129,12 +136,16 @@ func copyToGuest(id string, dest string, length int64, reader io.Reader) error { _, err = io.Copy(tw, reader) - errin := tw.Close() - errout := stdin.Close() + twErr := tw.Close() + stdinErr := stdin.Close() - errwait := cmd.Wait() + waitErr := cmd.Wait() + + if err != nil || twErr != nil || stdinErr != nil || waitErr != nil { + return fmt.Errorf("copy: {%s}, tw: {%s}, stdin: {%s}, wait: {%s}", err, twErr, stdinErr, waitErr) + } - return errors.Join(err, errout, errin, errwait) + return nil } func copyFromGuest(id string, src string, sink func(int64, io.Reader) error) error { @@ -155,9 +166,13 @@ func copyFromGuest(id string, src string, sink func(int64, io.Reader) error) err } err = sink(header.Size, tr) - errwait := cmd.Wait() + waitErr := cmd.Wait() + + if err != nil || waitErr != nil { + return fmt.Errorf("err: {%s}, wait: {%s}", err, waitErr) + } - return errors.Join(err, errwait) + return nil } // createVolume creates a volume populated with the provided files @@ -192,6 +207,10 @@ func createVolume(volumeName string, labels []string, files []tarEntry) (string, return "", err } uid = strings.TrimSpace(string(out)) + + if name == "" { + name = uid + } } run := []string{"run", "--rm", "-i"} @@ -231,23 +250,108 @@ func createVolume(volumeName string, labels []string, files []tarEntry) (string, } } - err1 := tw.Close() - err2 := stdin.Close() - err = errors.Join(err1, err2) + err = nil + twErr := tw.Close() + stdinErr := stdin.Close() + if twErr != nil || stdinErr != nil { + err = fmt.Errorf("tw: {%s}, stdin: {%s}", twErr, stdinErr) + } - if err3 := cmd.Wait(); err3 != nil { + if waitErr := cmd.Wait(); waitErr != nil { stderr := "" - if xerr, ok := err.(*exec.ExitError); ok { + if xerr, ok := waitErr.(*exec.ExitError); ok { stderr = string(xerr.Stderr) } - log.Printf("%s %s: %s %s", name, cmd.Args, err, stderr) + log.Printf("%s %s: %s %s", name, cmd.Args, waitErr, stderr) - return uid, errors.Join(err, err3) + err = fmt.Errorf("%s, wait: {%s}", err, waitErr) + return uid, err } return uid, err } +func getBridge(bridgeName string) (string, error) { + // {"CreatedAt":"2023-07-11 19:22:25.45027052 +0000 UTC","Driver":"bridge","ID":"fe52c7502c5d","IPv6":"false","Internal":"false","Labels":"goodbye=,hello=","Name":"testnet","Scope":"local"} + // podman has distinctly different fields at v4.4.1 so commented out fields that don't match. We only actually care about ID + type bridgeNet struct { + // CreatedAt string + Driver string + ID string + // IPv6 string + // Internal string + // Labels string + Name string + // Scope string + } + + // if the underlay bridge already exists, return that + // we don't check for a specific label or similar so that it's possible to use a bridge created by other frameworks for composite testing + var bridge bridgeNet + cmd := exec.Command("docker", "network", "ls", "--format", "json", "-f", fmt.Sprintf("name=%s$", bridgeName)) + out, err := cmd.Output() + if err != nil { + log.Printf("vcsim %s: %s", cmd.Args, err) + return "", err + } + + // unfortunately docker returns an empty string not an empty json doc and podman returns '[]' + // podman also returns an array of matches even when there's only one, so we normalize. + str := strings.TrimSpace(string(out)) + str = strings.TrimPrefix(str, "[") + str = strings.TrimSuffix(str, "]") + if len(str) == 0 { + return "", nil + } + + err = json.Unmarshal([]byte(str), &bridge) + if err != nil { + log.Printf("vcsim %s: %s", cmd.Args, err) + return "", err + } + + return bridge.ID, nil +} + +// createBridge creates a bridge network if one does not already exist +// returns: +// +// uid - string +// err - error or nil +func createBridge(bridgeName string, labels ...string) (string, error) { + + id, err := getBridge(bridgeName) + if err != nil { + return "", err + } + + if id != "" { + return id, nil + } + + run := []string{"network", "create", "--label", createdByVcsim} + for i := range labels { + run = append(run, "--label", labels[i]) + } + run = append(run, bridgeName) + + cmd := exec.Command("docker", run...) + out, err := cmd.Output() + if err != nil { + log.Printf("vcsim %s: %s: %s", cmd.Args, out, err) + return "", err + } + + // docker returns the ID regardless of whether you supply a name when creating the network, however + // podman returns the pretty name, so we have to normalize + id, err = getBridge(bridgeName) + if err != nil { + return "", err + } + + return id, nil +} + // create // - name - pretty name, eg. vm name // - id - uuid or similar - this is merged into container name rather than dictating containerID @@ -511,13 +615,12 @@ func (c *container) remove(ctx *Context) error { } } - combinedErr := errors.Join(err, lsverr, rmverr, lsnerr, rmnerr) - - if combinedErr == nil { - c.id = "" + if err != nil || lsverr != nil || rmverr != nil || lsnerr != nil || rmnerr != nil { + return fmt.Errorf("err: {%s}, lsverr: {%s}, rmverr: {%s}, lsnerr:{%s}, rmerr: {%s}", err, lsverr, rmverr, lsnerr, rmnerr) } - return combinedErr + c.id = "" + return nil } // watchContainer monitors the underlying container and updates @@ -552,9 +655,8 @@ func (c *container) watchContainer(ctx *Context, updateFn func(*Context, *contai } updateErr := updateFn(ctx, &details, c) - err = errors.Join(rmErr, updateErr) - if removing && err == nil { - // if we don't succeed we want to re-try + // if we don't succeed we want to re-try + if removing && rmErr == nil && updateErr == nil { ticker.Stop() return } diff --git a/simulator/container_host_system.go b/simulator/container_host_system.go index 121671c49..59f477b35 100644 --- a/simulator/container_host_system.go +++ b/simulator/container_host_system.go @@ -36,51 +36,35 @@ const GB = 1000 * MB const TB = 1000 * GB const PB = 1000 * TB +const ( + advOptPrefixPnicToUnderlayPrefix = "RUN.underlay." + advOptContainerBackingImage = "RUN.container" +) + type simHost struct { host *HostSystem c *container } -// createSimulationHost inspects the provided HostSystem and creates a simHost binding for it if -// the vm.Config.ExtraConfig set contains a key "RUN.container". -// If the ExtraConfig set does not contain that key, this returns nil. -// Methods on the simHost type are written to check for nil object so the return from this call can be blindly -// assigned and invoked without the caller caring about whether a binding for a backing container was warranted. -func createSimulationHost(ctx *Context, host *HostSystem) (*simHost, error) { - sh := &simHost{ - host: host, - } - - advOpts := ctx.Map.Get(host.ConfigManager.AdvancedOption.Reference()).(*OptionManager) - fault := advOpts.QueryOptions(&types.QueryOptions{Name: "RUN.container"}).(*methods.QueryOptionsBody).Fault() - if fault != nil { - if _, ok := fault.VimFault().(*types.InvalidName); ok { - return nil, nil - } - return nil, fmt.Errorf("errror retrieving container backing from host config manager: %+v", fault.VimFault()) - } - - // assemble env - var dockerEnv []string +// createSimHostMounts iterates over the provide filesystem mount info, creating docker volumes. It does _not_ delete volumes +// already created if creation of one fails. +// Returns: +// volume mounts: mount options suitable to pass directly to docker +// exec commands: a set of commands to run in the sim host after creation +// error: if construction of the above outputs fails +func createSimHostMounts(ctx *Context, containerName string, mounts []types.HostFileSystemMountInfo) ([]string, [][]string, error) { var dockerVol []string - var dockerNet []string var symlinkCmds [][]string - var err error - - hName := host.Summary.Config.Name - hUuid := host.Summary.Hardware.Uuid - containerName := constructContainerName(hName, hUuid) - - for i := range host.Config.FileSystemVolume.MountInfo { - info := &host.Config.FileSystemVolume.MountInfo[i] + for i := range mounts { + info := &mounts[i] name := info.Volume.GetHostFileSystemVolume().Name // NOTE: if we ever need persistence cross-invocation we can look at encoding the disk info as a label labels := []string{"name=" + name, "container=" + containerName, deleteWithContainer} dockerUuid, err := createVolume("", labels, nil) if err != nil { - return nil, err + return nil, nil, err } uuid := volumeIDtoHostVolumeUUID(dockerUuid) @@ -125,6 +109,10 @@ func createSimulationHost(ctx *Context, host *HostSystem) (*simHost, error) { } dockerVol = append(dockerVol, fmt.Sprintf("%s:/vmfs/volumes/%s:%s", dockerUuid, uuid, opt)) + + // create symlinks from /vmfs/volumes/ for the Volume Name - the direct mount (path) is only the uuid + // ? can we do this via a script in the ESX image instead of via exec? + // ? are the volume names exposed in any manner inside the host? They must be because these mounts exist but where does that come from? Chicken and egg problem? ConfigStore? symlinkCmds = append(symlinkCmds, []string{"ln", "-s", fmt.Sprintf("/vmfs/volumes/%s", uuid), fmt.Sprintf("/vmfs/volumes/%s", name)}) if strings.HasPrefix(name, "OSDATA") { symlinkCmds = append(symlinkCmds, []string{"mkdir", "-p", "/var/lib/vmware"}) @@ -132,37 +120,154 @@ func createSimulationHost(ctx *Context, host *HostSystem) (*simHost, error) { } } - // TODO: extract the underlay's from a topology config - // create a bridge for each broadcast domain a pnic is connected to - dockerNet = append(dockerNet, defaultUnderlayBridgeName) + return dockerVol, symlinkCmds, nil +} + +// createSimHostNetworks creates the networks for the host if not already created. Because we expect multiple hosts on the same network to act as a cluster +// it's likely that only the first host will create networks. +// This includes: +// * bridge network per-pNIC +// * bridge network per-DVS +// +// Returns: +// * array of networks to attach to +// * array of commands to run +// * error +func createSimHostNetworks(ctx *Context, containerName string, networkInfo *types.HostNetworkInfo, advOpts *OptionManager) ([]string, [][]string, error) { + var dockerNet []string + var cmds [][]string + + existingNets := make(map[string]string) + + // a pnic does not have an IP so this is purely a connectivity statement, not a network identity, however this is not how docker works + // so we're going to end up with a veth (our pnic) that does have an IP assigned. + // For now we're going to simply ignore that IP. //TODO: figure out whether we _need_ to do something with it at this point + for i := range networkInfo.Pnic { + pnicName := networkInfo.Pnic[i].Device + + bridge := getPnicUnderlay(advOpts, pnicName) + + if pnic, attached := existingNets[bridge]; attached { + return nil, nil, fmt.Errorf("cannot attach multiple pNICs to the same underlay: %s and %s both attempting to connect to %s for %s", pnic, pnicName, bridge, containerName) + } + + _, err := createBridge(bridge) + if err != nil { + return nil, nil, err + } + + dockerNet = append(dockerNet, bridge) + existingNets[bridge] = pnicName + } + + return dockerNet, cmds, nil +} + +func getPnicUnderlay(advOpts *OptionManager, pnicName string) string { + queryRes := advOpts.QueryOptions(&types.QueryOptions{Name: advOptPrefixPnicToUnderlayPrefix + pnicName}).(*methods.QueryOptionsBody).Res + return queryRes.Returnval[0].GetOptionValue().Value.(string) +} + +// createSimulationHostcreates a simHost binding if the host.ConfigManager.AdvancedOption set contains a key "RUN.container". +// If the set does not contain that key, this returns nil. +// Methods on the simHost type are written to check for nil object so the return from this call can be blindly +// assigned and invoked without the caller caring about whether a binding for a backing container was warranted. +// +// The created simhost is based off of the details of the supplied host system. +// VMFS locations are created based on FileSystemMountInfo +// Bridge networks are created to simulate underlay networks - one per pNIC. You cannot connect two pNICs to the same underlay. +// +// On Network connectivity - initially this is using docker network constructs. This means we cannot easily use nested "ip netns" so we cannot +// have a perfect representation of the ESX structure: pnic(veth)->vswtich(bridge)->{vmk,vnic}(veth) +// Instead we have the following: +// * bridge network per underlay - everything connects directly to the underlay +// * VMs/CRXs connect to the underlay dictated by the Uplink pNIC attached to their vSwitch +// * hostd vmknic gets the "host" container IP - we don't currently support multiple vmknics with different IPs +// * no support for mocking VLANs +func createSimulationHost(ctx *Context, host *HostSystem) (*simHost, error) { + sh := &simHost{ + host: host, + } + + advOpts := ctx.Map.Get(host.ConfigManager.AdvancedOption.Reference()).(*OptionManager) + fault := advOpts.QueryOptions(&types.QueryOptions{Name: "RUN.container"}).(*methods.QueryOptionsBody).Fault() + if fault != nil { + if _, ok := fault.VimFault().(*types.InvalidName); ok { + return nil, nil + } + return nil, fmt.Errorf("errror retrieving container backing from host config manager: %+v", fault.VimFault()) + } + + // assemble env + var dockerEnv []string - // TODO: add in vSwitches if we know them at this point + var execCmds [][]string - // - a pnic does not have an IP so this is purely a connectivity statement, not a network identity - // ? how is this underlay topology expressed? Initially we can assume a flat topology with all hosts on the same broadcast domain + var err error - // if there's a DVS that doesn't have a bridge, create the bridge + hName := host.Summary.Config.Name + hUuid := host.Summary.Hardware.Uuid + containerName := constructContainerName(hName, hUuid) + // create volumes and mounts + dockerVol, volCmds, err := createSimHostMounts(ctx, containerName, host.Config.FileSystemVolume.MountInfo) + if err != nil { + return nil, err + } + execCmds = append(execCmds, volCmds...) + + // create networks + dockerNet, netCmds, err := createSimHostNetworks(ctx, containerName, host.Config.Network, advOpts) + if err != nil { + return nil, err + } + execCmds = append(execCmds, netCmds...) + + // create the container sh.c, err = create(ctx, hName, hUuid, dockerNet, dockerVol, nil, dockerEnv, "alpine", []string{"sleep", "infinity"}) if err != nil { return nil, err } + // start the container err = sh.c.start(ctx) if err != nil { return nil, err } - // create symlinks from /vmfs/volumes/ for the Volume Name - the direct mount (path) is only the uuid - // ? can we do this via a script in the ESX image? are the volume names exposed in any manner instead the host? They must be because these mounts exist - // but where does that come from? Chicken and egg problem? ConfigStore? - for _, symlink := range symlinkCmds { - _, err := sh.c.exec(ctx, symlink) + // run post-creation steps + for _, cmd := range execCmds { + _, err := sh.c.exec(ctx, cmd) if err != nil { return nil, err } } + _, detail, err := sh.c.inspect() + + for i := range host.Config.Network.Pnic { + pnic := &host.Config.Network.Pnic[i] + bridge := getPnicUnderlay(advOpts, pnic.Device) + settings := detail.NetworkSettings.Networks[bridge] + + // it doesn't really make sense at an ESX level to set this information as IP bindings are associated with + // vnics (VMs) or vmknics (daemons such as hostd). + // However it's a useful location to stash this info in a manner that can be retrieved at a later date. + pnic.Spec.Ip.IpAddress = settings.IPAddress + pnic.Spec.Ip.SubnetMask = prefixToMask(settings.IPPrefixLen) + + pnic.Mac = settings.MacAddress + } + + // update the active "management" nicType with the container IP for vmnic0 + netconfig, err := host.getNetConfigInterface(ctx, "management") + if err != nil { + return nil, err + } + netconfig.vmk.Spec.Ip.IpAddress = netconfig.uplink.Spec.Ip.IpAddress + netconfig.vmk.Spec.Ip.SubnetMask = netconfig.uplink.Spec.Ip.SubnetMask + netconfig.vmk.Spec.Mac = netconfig.uplink.Mac + return sh, nil } diff --git a/simulator/container_host_system_test.go b/simulator/container_host_system_test.go index 86c293f7e..8709b4bc7 100644 --- a/simulator/container_host_system_test.go +++ b/simulator/container_host_system_test.go @@ -21,6 +21,7 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + "github.com/vmware/govmomi/simulator/esx" "github.com/vmware/govmomi/vim25/methods" "github.com/vmware/govmomi/vim25/types" @@ -172,11 +173,16 @@ func TestHostContainerBacking(t *testing.T) { ctx := SpoofContext() hs := NewHostSystem(esx.HostSystem) - hs.configureContainerBacking(ctx, "alpine", defaultSimVolumes) + hs.configureContainerBacking(ctx, "alpine", defaultSimVolumes, "vcsim-mgmt-underlay") + + details, err := hs.getNetConfigInterface(ctx, "management") + assert.NoError(t, err, "Expected no error from management netconfig check") + assert.Equal(t, "0.0.0.0", details.vmk.Spec.Ip.IpAddress, "Expected IP to be empty prior to container creation") hs.configure(ctx, types.HostConnectSpec{}, true) - //TODO: assert there's a container representing the host (consider a separate test for matching datastores and networks) + assert.NoError(t, err, "Expected no error from management netconfig check") + assert.NotEqual(t, "0.0.0.0", details.vmk.Spec.Ip.IpAddress, "Expected management IP to set after container creation") hs.sh.remove(ctx) } @@ -194,17 +200,27 @@ func TestMultipleSimHost(t *testing.T) { hs := NewHostSystem(esx.HostSystem) hs.configureContainerBacking(ctx, "alpine", defaultSimVolumes) - hs.configure(ctx, types.HostConnectSpec{}, true) - // TODO: assert container present - hs2 := NewHostSystem(esx.HostSystem) hs2.configureContainerBacking(ctx, "alpine", defaultSimVolumes) + details, err := hs.getNetConfigInterface(ctx, "management") + assert.NoError(t, err, "Expected no error from management netconfig check") + assert.Equal(t, "0.0.0.0", details.vmk.Spec.Ip.IpAddress, "Expected IP to be empty prior to container creation") + + hs.configure(ctx, types.HostConnectSpec{}, true) + + details2, err := hs2.getNetConfigInterface(ctx, "management") + assert.NoError(t, err, "Expected no error from management netconfig check") + assert.Equal(t, "0.0.0.0", details2.vmk.Spec.Ip.IpAddress, "Expected IP to be empty prior to container creation") + hs2.configure(ctx, types.HostConnectSpec{}, true) - // TODO: assert 2nd container present + + assert.NotEqual(t, details.vmk.Spec.Ip.IpAddress, details2.vmk.Spec.Ip.IpAddress, "Expected hosts to get different IPs") hs.sh.remove(ctx) - // TODO: assert one container plus volumes left + // TODO: assert one container plus volumes left - need to wait for + // https://github.com/containers/podman/issues/19219 to be fixed for podman to work - otherwise all volumes get removed + // with the first host removed hs2.sh.remove(ctx) } diff --git a/simulator/esx/host_config_filesystemvolume.go b/simulator/esx/host_config_filesystemvolume.go index ac841e05a..578fc5f64 100644 --- a/simulator/esx/host_config_filesystemvolume.go +++ b/simulator/esx/host_config_filesystemvolume.go @@ -33,6 +33,7 @@ const PB = 1000 * TB // HostConfigInfo is the default template for the HostSystem config property. // Capture method: // govc object.collect -s -dump HostSystem:ha-host config.fileSystemVolume +// - slightly modified for uuids and DiskName var HostFileSystemVolumeInfo = types.HostFileSystemVolumeInfo{ VolumeTypeList: []string{"VMFS", "NFS", "NFS41", "vsan", "VVOL", "VFFS", "OTHER", "PMEM"}, MountInfo: []types.HostFileSystemMountInfo{ diff --git a/simulator/host_system.go b/simulator/host_system.go index b334ac1c2..eb07b6eb4 100644 --- a/simulator/host_system.go +++ b/simulator/host_system.go @@ -20,6 +20,7 @@ import ( "fmt" "net" "os" + "sync" "time" "github.com/vmware/govmomi/simulator/esx" @@ -32,6 +33,8 @@ import ( var ( hostPortUnique = os.Getenv("VCSIM_HOST_PORT_UNIQUE") == "true" + globalLock sync.Mutex + // globalHostCount is used to construct unique hostnames. Should be consumed under globalLock. globalHostCount = 0 ) @@ -47,10 +50,6 @@ func asHostSystemMO(obj mo.Reference) (*mo.HostSystem, bool) { } func NewHostSystem(host mo.HostSystem) *HostSystem { - // lets us construct non-conflicting hostname automatically if omitted - // does not use the unique port to avoid constraints on port, such as >1024 - globalHostCount++ - if hostPortUnique { // configure unique port for each host port := &esx.HostSystem.Summary.Config.Port *port++ @@ -86,7 +85,8 @@ func NewHostSystem(host mo.HostSystem) *HostSystem { deepCopy(esx.AdvancedOptions, &cfg.Option) // add a supported option to the AdvancedOption manager - simOption := types.OptionDef{ElementDescription: types.ElementDescription{Key: "RUN.container"}} + simOption := types.OptionDef{ElementDescription: types.ElementDescription{Key: advOptContainerBackingImage}} + // TODO: how do we enter patterns here? Or should we stick to a list in the value? hs.Config.OptionDef = append(hs.Config.OptionDef, simOption) config := []struct { @@ -115,8 +115,16 @@ func (h *HostSystem) configure(ctx *Context, spec types.HostConnectSpec, connect h.Runtime.ConnectionState = types.HostSystemConnectionStateConnected } + // lets us construct non-conflicting hostname automatically if omitted + // does not use the unique port instead to avoid constraints on port, such as >1024 + + globalLock.Lock() + instanceID := globalHostCount + globalHostCount++ + globalLock.Unlock() + if spec.HostName == "" { - spec.HostName = fmt.Sprintf("esx-%d", globalHostCount) + spec.HostName = fmt.Sprintf("esx-%d", instanceID) } else if net.ParseIP(spec.HostName) != nil { h.Config.Network.Vnic[0].Spec.Ip.IpAddress = spec.HostName } @@ -127,9 +135,6 @@ func (h *HostSystem) configure(ctx *Context, spec types.HostConnectSpec, connect h.Summary.Hardware.Uuid = id h.Hardware.SystemInfo.Uuid = id - // bind to a simulation host with container backing if specified by options - // TODO: decide whether to require this at host creation or allow binding during a reconfigure - // TODO: handle the error return var err error h.sh, err = createSimulationHost(ctx, h) if err != nil { @@ -140,11 +145,22 @@ func (h *HostSystem) configure(ctx *Context, spec types.HostConnectSpec, connect // configureContainerBacking sets up _this_ host for simulation using a container backing. // Args: // -// image - the container image with which to simulate the host -// mounts - array of mount info that should be translated into /vmfs/volumes/... mounts backed by container volumes -func (h *HostSystem) configureContainerBacking(ctx *Context, image string, mounts []types.HostFileSystemMountInfo) { +// image - the container image with which to simulate the host +// mounts - array of mount info that should be translated into /vmfs/volumes/... mounts backed by container volumes +// networks - names of bridges to use for underlays. Will create a pNIC for each. The first will be treated as the management network. +// +// Restrictions adopted from createSimulationHost: +// * no mock of VLAN connectivity +// * only a single vmknic, used for "the management IP" +// * pNIC connectivity does not directly impact VMs/vmks using it as uplink +// +// The pnics will be named using standard pattern, ie. vmnic0, vmnic1, ... +// This will sanity check the NetConfig for "management" nicType to ensure that it maps through PortGroup->vSwitch->pNIC to vmnic0. +// +// TODO: figure out which other HostVirtualNicSpec's need to be updated with IP, eg. Config.vMotion, Config.Network.vNIC +func (h *HostSystem) configureContainerBacking(ctx *Context, image string, mounts []types.HostFileSystemMountInfo, networks ...string) error { option := &types.OptionValue{ - Key: "RUN.container", + Key: advOptContainerBackingImage, Value: image, } @@ -161,6 +177,200 @@ func (h *HostSystem) configureContainerBacking(ctx *Context, image string, mount MountInfo: mounts, } } + + // force at least a management network + if len(networks) == 0 { + networks = []string{defaultUnderlayBridgeName} + } + + // purge pNICs from the template - it makes no sense to keep them for a sim host + h.Config.Network.Pnic = make([]types.PhysicalNic, len(networks)) + + // purge any IPs and MACs associated with existing NetConfigs for the host + for cfgIdx := range h.Config.VirtualNicManagerInfo.NetConfig { + config := &h.Config.VirtualNicManagerInfo.NetConfig[cfgIdx] + for candidateIdx := range config.CandidateVnic { + candidate := &config.CandidateVnic[candidateIdx] + candidate.Spec.Ip.IpAddress = "0.0.0.0" + candidate.Spec.Ip.SubnetMask = "0.0.0.0" + candidate.Spec.Mac = "00:00:00:00:00:00" + } + } + + // The presence of a pNIC is used to indicate connectivity to a specific underlay. We construct an empty pNIC entry and specify the underly via + // host.ConfigManager.AdvancedOptions. The pNIC will be populated with the MAC (accurate) and IP (divergence - we need to stash it somewhere) for the veth. + // We create a NetConfig "management" entry for the first pNIC - this will be populated with the IP of the "host" container. + + // create a pNIC for each underlay + for i, net := range networks { + name := fmt.Sprintf("vmnic%d", i) + + // we don't have a natural field for annotating which pNIC is connected to which network, so stash it in an adv option. + option := &types.OptionValue{ + Key: advOptPrefixPnicToUnderlayPrefix + name, + Value: net, + } + fault = advOpts.UpdateOptions(&types.UpdateOptions{ChangedValue: []types.BaseOptionValue{option}}).Fault() + if fault != nil { + panic(fault) + } + + h.Config.Network.Pnic[i] = types.PhysicalNic{ + Key: "key-vim.host.PhysicalNic-" + name, + Device: name, + Pci: fmt.Sprintf("0000:%2d:00.0", i+1), + Driver: "vcsim-bridge", + DriverVersion: "1.2.10.0", + FirmwareVersion: "1.57, 0x80000185", + LinkSpeed: &types.PhysicalNicLinkInfo{ + SpeedMb: 10000, + Duplex: true, + }, + ValidLinkSpecification: []types.PhysicalNicLinkInfo{ + { + SpeedMb: 10000, + Duplex: true, + }, + }, + Spec: types.PhysicalNicSpec{ + Ip: &types.HostIpConfig{}, + LinkSpeed: (*types.PhysicalNicLinkInfo)(nil), + EnableEnhancedNetworkingStack: types.NewBool(false), + EnsInterruptEnabled: types.NewBool(false), + }, + WakeOnLanSupported: false, + Mac: "00:00:00:00:00:00", + FcoeConfiguration: &types.FcoeConfig{ + PriorityClass: 3, + SourceMac: "00:00:00:00:00:00", + VlanRange: []types.FcoeConfigVlanRange{ + {}, + }, + Capabilities: types.FcoeConfigFcoeCapabilities{}, + FcoeActive: false, + }, + VmDirectPathGen2Supported: types.NewBool(false), + VmDirectPathGen2SupportedMode: "", + ResourcePoolSchedulerAllowed: types.NewBool(false), + ResourcePoolSchedulerDisallowedReason: nil, + AutoNegotiateSupported: types.NewBool(true), + EnhancedNetworkingStackSupported: types.NewBool(false), + EnsInterruptSupported: types.NewBool(false), + RdmaDevice: "", + DpuId: "", + } + } + + // sanity check that everything's hung together sufficiently well + details, err := h.getNetConfigInterface(ctx, "management") + if err != nil { + return err + } + + if details.uplink == nil || details.uplink.Device != "vmnic0" { + return fmt.Errorf("Config provided for host %s does not result in a consistent 'management' NetConfig that's bound to 'vmnic0'", h.Name) + } + + return nil +} + +// netConfigDetails is used to packaged up all the related network entities associated with a NetConfig binding +type netConfigDetails struct { + nicType string + netconfig *types.VirtualNicManagerNetConfig + vmk *types.HostVirtualNic + netstack *types.HostNetStackInstance + portgroup *types.HostPortGroup + vswitch *types.HostVirtualSwitch + uplink *types.PhysicalNic +} + +// getNetConfigInterface returns the set of constructs active for a given nicType (eg. "management", "vmotion") +// This method is provided because the Config structure held by HostSystem is heavily interconnected but serialized and not cross-linked with pointers. +// As such there's a _lot_ of cross-referencing that needs to be done to navigate. +// The pNIC returned is the uplink associated with the vSwitch for the netconfig +func (h *HostSystem) getNetConfigInterface(ctx *Context, nicType string) (*netConfigDetails, error) { + details := &netConfigDetails{ + nicType: nicType, + } + + for i := range h.Config.VirtualNicManagerInfo.NetConfig { + if h.Config.VirtualNicManagerInfo.NetConfig[i].NicType == nicType { + details.netconfig = &h.Config.VirtualNicManagerInfo.NetConfig[i] + break + } + } + if details.netconfig == nil { + return nil, fmt.Errorf("no matching NetConfig for NicType=%s", nicType) + } + + if details.netconfig.SelectedVnic == nil { + return details, nil + } + + vnicKey := details.netconfig.SelectedVnic[0] + for i := range details.netconfig.CandidateVnic { + if details.netconfig.CandidateVnic[i].Key == vnicKey { + details.vmk = &details.netconfig.CandidateVnic[i] + break + } + } + if details.vmk == nil { + panic(fmt.Sprintf("NetConfig for host %s references non-existant vNIC key %s for %s nicType", h.Name, vnicKey, nicType)) + } + + portgroupName := details.vmk.Portgroup + netstackKey := details.vmk.Spec.NetStackInstanceKey + + for i := range h.Config.Network.NetStackInstance { + if h.Config.Network.NetStackInstance[i].Key == netstackKey { + details.netstack = &h.Config.Network.NetStackInstance[i] + break + } + } + if details.netstack == nil { + panic(fmt.Sprintf("NetConfig for host %s references non-existant NetStack key %s for %s nicType", h.Name, netstackKey, nicType)) + } + + for i := range h.Config.Network.Portgroup { + // TODO: confirm correctness of this - seems weird it references the Spec.Name instead of the key like everything else. + if h.Config.Network.Portgroup[i].Spec.Name == portgroupName { + details.portgroup = &h.Config.Network.Portgroup[i] + break + } + } + if details.portgroup == nil { + panic(fmt.Sprintf("NetConfig for host %s references non-existant PortGroup name %s for %s nicType", h.Name, portgroupName, nicType)) + } + + vswitchKey := details.portgroup.Vswitch + for i := range h.Config.Network.Vswitch { + if h.Config.Network.Vswitch[i].Key == vswitchKey { + details.vswitch = &h.Config.Network.Vswitch[i] + break + } + } + if details.vswitch == nil { + panic(fmt.Sprintf("NetConfig for host %s references non-existant vSwitch key %s for %s nicType", h.Name, vswitchKey, nicType)) + } + + if len(details.vswitch.Pnic) != 1 { + // to change this, look at the Active NIC in the NicTeamingPolicy, but for now not worth it + panic(fmt.Sprintf("vSwitch %s for host %s has multiple pNICs associated which is not supported.", vswitchKey, h.Name)) + } + + pnicKey := details.vswitch.Pnic[0] + for i := range h.Config.Network.Pnic { + if h.Config.Network.Pnic[i].Key == pnicKey { + details.uplink = &h.Config.Network.Pnic[i] + break + } + } + if details.uplink == nil { + panic(fmt.Sprintf("NetConfig for host %s references non-existant pNIC key %s for %s nicType", h.Name, pnicKey, nicType)) + } + + return details, nil } func (h *HostSystem) event() types.HostEvent { diff --git a/simulator/host_system_test.go b/simulator/host_system_test.go index 9bfe26b6f..99e61b2c4 100644 --- a/simulator/host_system_test.go +++ b/simulator/host_system_test.go @@ -21,6 +21,7 @@ import ( "testing" "github.com/stretchr/testify/assert" + "github.com/vmware/govmomi" "github.com/vmware/govmomi/find" "github.com/vmware/govmomi/object" From 6f34f1319ce2923a7b199da6648cc8cccb0c49d9 Mon Sep 17 00:00:00 2001 From: George Hicken Date: Fri, 14 Jul 2023 04:50:41 -0700 Subject: [PATCH 5/8] vcsim: container backing respects changes via reconfigure If "RUN.container" is added or removed on an existing VM, that change is applied immediately if the VM is currently powered on. Modifications to the value of the key do not have an effect unless the continue needs to be recreated for some reason. Switches to Go templates for formating docker command output Includes additional error logging detail --- simulator/container.go | 11 +- simulator/container_virtual_machine.go | 6 +- simulator/container_virtual_machine_test.go | 249 ++++++++++++++++++++ simulator/feature_test.go | 2 +- simulator/virtual_machine.go | 63 ++++- 5 files changed, 315 insertions(+), 16 deletions(-) create mode 100644 simulator/container_virtual_machine_test.go diff --git a/simulator/container.go b/simulator/container.go index 6f626d311..16294b8e8 100644 --- a/simulator/container.go +++ b/simulator/container.go @@ -288,10 +288,10 @@ func getBridge(bridgeName string) (string, error) { // if the underlay bridge already exists, return that // we don't check for a specific label or similar so that it's possible to use a bridge created by other frameworks for composite testing var bridge bridgeNet - cmd := exec.Command("docker", "network", "ls", "--format", "json", "-f", fmt.Sprintf("name=%s$", bridgeName)) + cmd := exec.Command("docker", "network", "ls", "--format={{json .}}", "-f", fmt.Sprintf("name=%s$", bridgeName)) out, err := cmd.Output() if err != nil { - log.Printf("vcsim %s: %s", cmd.Args, err) + log.Printf("vcsim %s: %s, %s", cmd.Args, err, out) return "", err } @@ -306,7 +306,7 @@ func getBridge(bridgeName string) (string, error) { err = json.Unmarshal([]byte(str), &bridge) if err != nil { - log.Printf("vcsim %s: %s", cmd.Args, err) + log.Printf("vcsim %s: %s, %s", cmd.Args, err, str) return "", err } @@ -586,15 +586,16 @@ func (c *container) remove(ctx *Context) error { if lsverr != nil { log.Printf("%s %s: %s", c.name, cmd.Args, lsverr) } + log.Printf("%s volumes: %s", c.name, volumesToReap) var rmverr error if len(volumesToReap) > 0 { run := []string{"volume", "rm", "-f"} run = append(run, strings.Split(string(volumesToReap), "\n")...) cmd = exec.Command("docker", run...) - rmverr = cmd.Run() + out, rmverr := cmd.Output() if rmverr != nil { - log.Printf("%s %s: %s", c.name, cmd.Args, rmverr) + log.Printf("%s %s: %s, %s", c.name, cmd.Args, rmverr, out) } } diff --git a/simulator/container_virtual_machine.go b/simulator/container_virtual_machine.go index cc67d063f..c12a1e1eb 100644 --- a/simulator/container_virtual_machine.go +++ b/simulator/container_virtual_machine.go @@ -34,6 +34,8 @@ import ( "github.com/vmware/govmomi/vim25/types" ) +const ContainerBackingOptionKey = "RUN.container" + var ( toolsRunning = []types.PropertyChange{ {Name: "guest.toolsStatus", Val: types.VirtualMachineToolsStatusToolsOk}, @@ -63,7 +65,7 @@ func createSimulationVM(vm *VirtualMachine) *simVM { for _, opt := range vm.Config.ExtraConfig { val := opt.GetOptionValue() - if val.Key == "RUN.container" { + if val.Key == ContainerBackingOptionKey { return svm } } @@ -203,7 +205,7 @@ func (svm *simVM) start(ctx *Context) error { for _, opt := range svm.vm.Config.ExtraConfig { val := opt.GetOptionValue() - if val.Key == "RUN.container" { + if val.Key == ContainerBackingOptionKey { run := val.Value.(string) err := json.Unmarshal([]byte(run), &args) if err != nil { diff --git a/simulator/container_virtual_machine_test.go b/simulator/container_virtual_machine_test.go new file mode 100644 index 000000000..e36782a66 --- /dev/null +++ b/simulator/container_virtual_machine_test.go @@ -0,0 +1,249 @@ +/* +Copyright (c) 2023 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package simulator + +import ( + "bytes" + "context" + "fmt" + "log" + "os" + "os/exec" + "path/filepath" + "strings" + "testing" + + "github.com/vmware/govmomi/find" + "github.com/vmware/govmomi/object" + "github.com/vmware/govmomi/vim25" + "github.com/vmware/govmomi/vim25/types" +) + +// takes a content string to serve from the container and returns ExtraConfig options +// to construct container +// content - the contents of index.html +// port - the port to forward to the container port 80 +func constructNginxBacking(t *testing.T, content string, port int) []types.BaseOptionValue { + dir := t.TempDir() + for dirpart := dir; dirpart != "/"; dirpart = filepath.Dir(dirpart) { + os.Chmod(dirpart, 0755) + } + + fpath := filepath.Join(dir, "index.html") + os.WriteFile(fpath, []byte(content), 0644) + // just in case umask gets in the way + os.Chmod(fpath, 0644) + + args := fmt.Sprintf("-v '%s:/usr/share/nginx/html:ro' nginx", dir) + + return []types.BaseOptionValue{ + &types.OptionValue{Key: ContainerBackingOptionKey, Value: args}, // run nginx + &types.OptionValue{Key: "RUN.port.80", Value: "8888"}, // test port remap + } +} + +// validates the VM is serving the expected content on the expected ports +// pairs with constructNginxBacking +func validateNginxContainer(t *testing.T, vm *object.VirtualMachine, expected string, port int) error { + ip, _ := vm.WaitForIP(context.Background(), true) // Returns the docker container's IP + + // Count the number of bytes in feature_test.go via nginx going direct to the container + cmd := exec.Command("docker", "run", "--rm", "curlimages/curl", "curl", "-f", fmt.Sprintf("http://%s:80", ip)) + var buf bytes.Buffer + cmd.Stdout = &buf + err := cmd.Run() + res := buf.String() + + if err != nil || strings.TrimSpace(res) != expected { + // we use Fail not Fatal because we want to clean up + t.Fail() + t.Log(err, buf.String()) + fmt.Printf("%d diff", buf.Len()-len(expected)) + } + + // Count the number of bytes in feature_test.go via nginx going via port remap on host + cmd = exec.Command("curl", "-f", fmt.Sprintf("http://localhost:%d", port)) + buf.Reset() + cmd.Stdout = &buf + err = cmd.Run() + res = buf.String() + if err != nil || strings.TrimSpace(res) != expected { + t.Fail() + t.Log(err, buf.String()) + fmt.Printf("%d diff", buf.Len()-len(expected)) + } + + return nil +} + +// 1. Construct ExtraConfig args for container backing +// 2. Create VM using that ExtraConfig +// 3. Confirm docker container present that matches expectations +func TestCreateVMWithContainerBacking(t *testing.T) { + Test(func(ctx context.Context, c *vim25.Client) { + if _, err := exec.LookPath("docker"); err != nil { + fmt.Println("0 diff") + t.Skip("docker client binary not on PATH") + return + } + + finder := find.NewFinder(c) + pool, _ := finder.ResourcePool(ctx, "DC0_H0/Resources") + dc, err := finder.Datacenter(ctx, "DC0") + if err != nil { + log.Fatal(err) + } + + content := "foo" + port := 8888 + + spec := types.VirtualMachineConfigSpec{ + Name: "nginx-container-backed-from-creation", + Files: &types.VirtualMachineFileInfo{ + VmPathName: "[LocalDS_0] nginx", + }, + ExtraConfig: constructNginxBacking(t, content, port), + } + + f, _ := dc.Folders(ctx) + // Create a new VM + task, err := f.VmFolder.CreateVM(ctx, spec, pool, nil) + if err != nil { + log.Fatal(err) + } + + info, err := task.WaitForResult(ctx, nil) + if err != nil { + log.Fatal(err) + } + + vm := object.NewVirtualMachine(c, info.Result.(types.ManagedObjectReference)) + // PowerOn VM starts the nginx container + task, _ = vm.PowerOn(ctx) + err = task.Wait(ctx) + if err != nil { + log.Fatal(err) + } + + err = validateNginxContainer(t, vm, content, port) + if err != nil { + log.Fatal(err) + } + + spec2 := types.VirtualMachineConfigSpec{ + ExtraConfig: []types.BaseOptionValue{ + &types.OptionValue{Key: ContainerBackingOptionKey, Value: ""}, + }, + } + + task, err = vm.Reconfigure(ctx, spec2) + if err != nil { + log.Fatal(err) + } + + info, err = task.WaitForResult(ctx, nil) + if err != nil { + log.Fatal(info, err) + } + + // PowerOff stops the container + task, _ = vm.PowerOff(ctx) + _ = task.Wait(ctx) + // Destroy deletes the container + task, _ = vm.Destroy(ctx) + _ = task.Wait(ctx) + }) + // Output: 0 diff +} + +// 1. Create VM without ExtraConfig args for container backing +// 2. Construct ExtraConfig args for container backing +// 3. Update VM with ExtraConfig +// 4. Confirm docker container present that matches expectations +func TestUpdateVMAddContainerBacking(t *testing.T) { + Test(func(ctx context.Context, c *vim25.Client) { + if _, err := exec.LookPath("docker"); err != nil { + fmt.Println("0 diff") + t.Skip("docker client binary not on PATH") + return + } + + finder := find.NewFinder(c) + pool, _ := finder.ResourcePool(ctx, "DC0_H0/Resources") + dc, err := finder.Datacenter(ctx, "DC0") + if err != nil { + log.Fatal(err) + } + + content := "foo" + port := 8888 + + spec := types.VirtualMachineConfigSpec{ + Name: "nginx-container-after-reconfig", + Files: &types.VirtualMachineFileInfo{ + VmPathName: "[LocalDS_0] nginx", + }, + } + + f, _ := dc.Folders(ctx) + // Create a new VM + task, err := f.VmFolder.CreateVM(ctx, spec, pool, nil) + if err != nil { + log.Fatal(err) + } + + info, err := task.WaitForResult(ctx, nil) + if err != nil { + log.Fatal(err) + } + + vm := object.NewVirtualMachine(c, info.Result.(types.ManagedObjectReference)) + // PowerOn VM starts the nginx container + task, _ = vm.PowerOn(ctx) + err = task.Wait(ctx) + if err != nil { + log.Fatal(err) + } + + spec2 := types.VirtualMachineConfigSpec{ + ExtraConfig: constructNginxBacking(t, content, port), + } + + task, err = vm.Reconfigure(ctx, spec2) + if err != nil { + log.Fatal(err) + } + + info, err = task.WaitForResult(ctx, nil) + if err != nil { + log.Fatal(info, err) + } + + err = validateNginxContainer(t, vm, content, port) + if err != nil { + log.Fatal(err) + } + + // PowerOff stops the container + task, _ = vm.PowerOff(ctx) + _ = task.Wait(ctx) + // Destroy deletes the container + task, _ = vm.Destroy(ctx) + _ = task.Wait(ctx) + }) + // Output: 0 diff +} diff --git a/simulator/feature_test.go b/simulator/feature_test.go index 8ff91b116..ea817e052 100644 --- a/simulator/feature_test.go +++ b/simulator/feature_test.go @@ -159,7 +159,7 @@ func Example_runContainer() { } // Count the number of bytes in feature_test.go via nginx going via port remap on host - cmd = exec.Command("docker", "run", "--rm", "--network=host", "curlimages/curl", "curl", "-f", fmt.Sprintf("http://%s", ip)) + cmd = exec.Command("curl", "-f", "http://localhost:8888") buf.Reset() cmd.Stdout = &buf err = cmd.Run() diff --git a/simulator/virtual_machine.go b/simulator/virtual_machine.go index d65937c85..ff43a26c5 100644 --- a/simulator/virtual_machine.go +++ b/simulator/virtual_machine.go @@ -394,7 +394,8 @@ func extraConfigKey(key string) string { return key } -func (vm *VirtualMachine) applyExtraConfig(spec *types.VirtualMachineConfigSpec) { +func (vm *VirtualMachine) applyExtraConfig(ctx *Context, spec *types.VirtualMachineConfigSpec) types.BaseMethodFault { + var removedContainerBacking bool var changes []types.PropertyChange for _, c := range spec.ExtraConfig { val := c.GetOptionValue() @@ -419,6 +420,9 @@ func (vm *VirtualMachine) applyExtraConfig(spec *types.VirtualMachineConfigSpec) vm.Config.ExtraConfig = append(vm.Config.ExtraConfig, c) } else { if s, ok := val.Value.(string); ok && s == "" { + if key == ContainerBackingOptionKey { + removedContainerBacking = true + } // Remove existing element l := len(vm.Config.ExtraConfig) vm.Config.ExtraConfig[keyIndex] = vm.Config.ExtraConfig[l-1] @@ -450,13 +454,48 @@ func (vm *VirtualMachine) applyExtraConfig(spec *types.VirtualMachineConfigSpec) ) } } - if len(changes) != 0 { - Map.Update(vm, changes) - } + // create the container backing before we publish the updates so the simVM is available before handlers + // get triggered + var fault types.BaseMethodFault if vm.svm == nil { vm.svm = createSimulationVM(vm) + + // check to see if the VM is already powered on - if so we need to retroactively hit that path here + if vm.Runtime.PowerState == types.VirtualMachinePowerStatePoweredOn { + err := vm.svm.start(ctx) + if err != nil { + // don't attempt to undo the changes already made - just return an error + // we'll retry the svm.start operation on pause/restart calls + fault = &types.VAppConfigFault{ + VimFault: types.VimFault{ + MethodFault: types.MethodFault{ + FaultCause: &types.LocalizedMethodFault{ + Fault: &types.SystemErrorFault{Reason: err.Error()}, + LocalizedMessage: err.Error()}}}} + } + } + } else if removedContainerBacking { + err := vm.svm.remove(ctx) + if err == nil { + vm.svm = nil + } else { + // don't attempt to undo the changes already made - just return an error + // we'll retry the svm.start operation on pause/restart calls + fault = &types.VAppConfigFault{ + VimFault: types.VimFault{ + MethodFault: types.MethodFault{ + FaultCause: &types.LocalizedMethodFault{ + Fault: &types.SystemErrorFault{Reason: err.Error()}, + LocalizedMessage: err.Error()}}}} + } + } + + if len(changes) != 0 { + Map.Update(vm, changes) } + + return fault } func validateGuestID(id string) types.BaseMethodFault { @@ -1499,6 +1538,7 @@ func (vm *VirtualMachine) genVmdkPath(p object.DatastorePath) (string, types.Bas func (vm *VirtualMachine) configureDevices(ctx *Context, spec *types.VirtualMachineConfigSpec) types.BaseMethodFault { devices := object.VirtualDeviceList(vm.Config.Hardware.Device) + var err types.BaseMethodFault for i, change := range spec.DeviceChange { dspec := change.GetVirtualDeviceConfigSpec() device := dspec.Device.GetVirtualDevice() @@ -1535,7 +1575,7 @@ func (vm *VirtualMachine) configureDevices(ctx *Context, spec *types.VirtualMach } key := device.Key - err := vm.configureDevice(ctx, devices, dspec, nil) + err = vm.configureDevice(ctx, devices, dspec, nil) if err != nil { return err } @@ -1562,7 +1602,7 @@ func (vm *VirtualMachine) configureDevices(ctx *Context, spec *types.VirtualMach device.DeviceInfo.GetDescription().Summary = "" // regenerate summary } - err := vm.configureDevice(ctx, devices, dspec, oldDevice) + err = vm.configureDevice(ctx, devices, dspec, oldDevice) if err != nil { return err } @@ -1577,9 +1617,16 @@ func (vm *VirtualMachine) configureDevices(ctx *Context, spec *types.VirtualMach {Name: "config.hardware.device", Val: []types.BaseVirtualDevice(devices)}, }) - vm.updateDiskLayouts() + err = vm.updateDiskLayouts() + if err != nil { + return err + } - vm.applyExtraConfig(spec) // Do this after device config, as some may apply to the devices themselves (e.g. ethernet -> guest.net) + // Do this after device config, as some may apply to the devices themselves (e.g. ethernet -> guest.net) + err = vm.applyExtraConfig(ctx, spec) + if err != nil { + return err + } return nil } From 9b2ef98635eaa88a25ee9bdb9c8e1dbaa235b289 Mon Sep 17 00:00:00 2001 From: George Hicken Date: Wed, 2 Aug 2023 00:57:16 +0000 Subject: [PATCH 6/8] vcsim: use docker event for efficient monitoring Makes use of the docker events stream to trigger inspect operations against containers where waiting for things such as IPs. Corrects prior failure to stop the async container watch when the container was removed. Updates to locking to avoid race warnings. Updates vcsim.bats to look for a volume with `--dmi` suffix instead of a volume with the plain container name. --- govc/test/vcsim.bats | 2 +- simulator/container.go | 215 +++++++++++++++++++++---- simulator/container_host_system.go | 9 +- simulator/container_virtual_machine.go | 17 +- simulator/host_system.go | 3 +- simulator/virtual_machine.go | 4 + 6 files changed, 209 insertions(+), 41 deletions(-) diff --git a/govc/test/vcsim.bats b/govc/test/vcsim.bats index fad4a40c1..4c9bb850c 100755 --- a/govc/test/vcsim.bats +++ b/govc/test/vcsim.bats @@ -340,7 +340,7 @@ EOF run docker inspect -f '{{.State.Status}}' "$name" assert_success "running" - run docker volume inspect "$name" + run docker volume inspect "$name--dmi" assert_success run govc vm.destroy $vm diff --git a/simulator/container.go b/simulator/container.go index 16294b8e8..f39ef9970 100644 --- a/simulator/container.go +++ b/simulator/container.go @@ -18,7 +18,9 @@ package simulator import ( "archive/tar" + "bufio" "bytes" + "context" "encoding/json" "errors" "fmt" @@ -30,11 +32,13 @@ import ( "path" "regexp" "strings" + "sync" "time" ) var ( - shell = "/bin/sh" + shell = "/bin/sh" + eventWatch eventWatcher ) const ( @@ -48,10 +52,26 @@ func init() { } } +type eventWatcher struct { + sync.Mutex + + stdin io.WriteCloser + stdout io.ReadCloser + process *os.Process + + // watches is a map of container IDs to container objects + watches map[string]*container +} + // container provides methods to manage a container within a simulator VM lifecycle. type container struct { + sync.Mutex + id string name string + + cancelWatch context.CancelFunc + changes chan struct{} } type networkSettings struct { @@ -112,7 +132,6 @@ type tarEntry struct { // From https://docs.docker.com/engine/reference/commandline/cp/ : // > It is not possible to copy certain system files such as resources under /proc, /sys, /dev, tmpfs, and mounts created by the user in the container. // > However, you can still copy such files by manually running tar in docker exec. -// TODO: look at whether this can useful combine with populateVolume for the tar portion or whether the duplication is low enough to make sense func copyToGuest(id string, dest string, length int64, reader io.Reader) error { cmd := exec.Command("docker", "exec", "-i", id, "tar", "Cxf", path.Dir(dest), "-") cmd.Stderr = os.Stderr @@ -190,7 +209,6 @@ func createVolume(volumeName string, labels []string, files []tarEntry) (string, image = "busybox" } - // TODO: do we need to cap name lengths so as not to overflow? name := sanitizeName(volumeName) uid := "" @@ -367,6 +385,7 @@ func create(ctx *Context, name string, id string, networks []string, volumes []s var c container c.name = constructContainerName(name, id) + c.changes = make(chan struct{}) for i := range volumes { // we'll pre-create anonymous volumes, simply for labelling consistency @@ -437,7 +456,11 @@ func (c *container) createVolume(name string, labels []string, files []tarEntry) // * if c.id is empty, or docker returns "No such object", will return an uninitializedContainer error // * err from either execution or parsing of json output func (c *container) inspect() (out []byte, detail containerDetails, err error) { - if c.id == "" { + c.Lock() + id := c.id + c.Unlock() + + if id == "" { err = uninitializedContainer(errors.New("inspect of uninitialized container")) return } @@ -472,7 +495,11 @@ func (c *container) inspect() (out []byte, detail containerDetails, err error) { // start // - if the container already exists, start it or unpause it. func (c *container) start(ctx *Context) error { - if c.id == "" { + c.Lock() + id := c.id + c.Unlock() + + if id == "" { return uninitializedContainer(errors.New("start of uninitialized container")) } @@ -497,7 +524,11 @@ func (c *container) start(ctx *Context) error { // pause the container (if any) for the given vm. func (c *container) pause(ctx *Context) error { - if c.id == "" { + c.Lock() + id := c.id + c.Unlock() + + if id == "" { return uninitializedContainer(errors.New("pause of uninitialized container")) } @@ -512,7 +543,11 @@ func (c *container) pause(ctx *Context) error { // restart the container (if any) for the given vm. func (c *container) restart(ctx *Context) error { - if c.id == "" { + c.Lock() + id := c.id + c.Unlock() + + if id == "" { return uninitializedContainer(errors.New("restart of uninitialized container")) } @@ -527,7 +562,11 @@ func (c *container) restart(ctx *Context) error { // stop the container (if any) for the given vm. func (c *container) stop(ctx *Context) error { - if c.id == "" { + c.Lock() + id := c.id + c.Unlock() + + if id == "" { return uninitializedContainer(errors.New("stop of uninitialized container")) } @@ -548,7 +587,11 @@ func (c *container) stop(ctx *Context) error { // * uninitializedContainer error - if c.id is empty // * err from cmd execution func (c *container) exec(ctx *Context, args []string) (string, error) { - if c.id == "" { + c.Lock() + id := c.id + c.Unlock() + + if id == "" { return "", uninitializedContainer(errors.New("exec into uninitialized container")) } @@ -569,6 +612,9 @@ func (c *container) exec(ctx *Context, args []string) (string, error) { // // err - joined err from deletion of container and any volumes or networks that have coupled lifecycle func (c *container) remove(ctx *Context) error { + c.Lock() + defer c.Unlock() + if c.id == "" { // consider absence success return nil @@ -620,49 +666,86 @@ func (c *container) remove(ctx *Context) error { return fmt.Errorf("err: {%s}, lsverr: {%s}, rmverr: {%s}, lsnerr:{%s}, rmerr: {%s}", err, lsverr, rmverr, lsnerr, rmnerr) } + if c.cancelWatch != nil { + c.cancelWatch() + eventWatch.ignore(c) + } c.id = "" return nil } +// updated is a simple trigger allowing a caller to indicate that something has likely changed about the container +// and interested parties should re-inspect as needed. +func (c *container) updated() { + consolidationWindow := 250 * time.Millisecond + if d, err := time.ParseDuration(os.Getenv("VCSIM_EVENT_CONSOLIDATION_WINDOW")); err == nil { + consolidationWindow = d + } + + select { + case c.changes <- struct{}{}: + time.Sleep(consolidationWindow) + // as this is only a hint to avoid waiting for the full inspect interval, we don't care about accumulating + // multiple triggers. We do pause to allow large numbers of sequential updates to consolidate + default: + } +} + // watchContainer monitors the underlying container and updates // properties based on the container status. This occurs until either // the container or the VM is removed. // returns: // // err - uninitializedContainer error - if c.id is empty -func (c *container) watchContainer(ctx *Context, updateFn func(*Context, *containerDetails, *container) error) error { +func (c *container) watchContainer(ctx context.Context, updateFn func(*containerDetails, *container) error) error { + c.Lock() + defer c.Unlock() + if c.id == "" { return uninitializedContainer(errors.New("Attempt to watch uninitialized container")) } + eventWatch.watch(c) + + cancelCtx, cancelFunc := context.WithCancel(ctx) + c.cancelWatch = cancelFunc + // Update the VM from the container at regular intervals until the done // channel is closed. go func() { - inspectInterval := time.Duration(5 * time.Second) + inspectInterval := 10 * time.Second if d, err := time.ParseDuration(os.Getenv("VCSIM_INSPECT_INTERVAL")); err == nil { inspectInterval = d } ticker := time.NewTicker(inspectInterval) + update := func() { + _, details, err := c.inspect() + var rmErr error + var removing bool + if _, ok := err.(uninitializedContainer); ok { + removing = true + rmErr = c.remove(SpoofContext()) + } + + updateErr := updateFn(&details, c) + // if we don't succeed we want to re-try + if removing && rmErr == nil && updateErr == nil { + ticker.Stop() + return + } + if updateErr != nil { + log.Printf("vcsim container watch: %s %s", c.id, updateErr) + } + } + for { select { + case <-c.changes: + update() case <-ticker.C: - _, details, err := c.inspect() - var rmErr error - var removing bool - if _, ok := err.(uninitializedContainer); ok { - removing = true - rmErr = c.remove(ctx) - } - - updateErr := updateFn(ctx, &details, c) - // if we don't succeed we want to re-try - if removing && rmErr == nil && updateErr == nil { - ticker.Stop() - return - } - // TODO: log err? - case <-ctx.Done(): + update() + case <-cancelCtx.Done(): return } } @@ -670,3 +753,81 @@ func (c *container) watchContainer(ctx *Context, updateFn func(*Context, *contai return nil } + +func (w *eventWatcher) watch(c *container) { + w.Lock() + defer w.Unlock() + + if w.watches == nil { + w.watches = make(map[string]*container) + } + + w.watches[c.id] = c + + if w.stdin == nil { + cmd := exec.Command("docker", "events", "--format", "'{{.ID}}'", "--filter", "Type=container") + w.stdout, _ = cmd.StdoutPipe() + w.stdin, _ = cmd.StdinPipe() + err := cmd.Start() + if err != nil { + log.Printf("docker event watcher: %s %s", cmd.Args, err) + w.stdin = nil + w.stdout = nil + w.process = nil + + return + } + + w.process = cmd.Process + + go w.monitor() + } +} + +func (w *eventWatcher) ignore(c *container) { + w.Lock() + + delete(w.watches, c.id) + + if len(w.watches) == 0 && w.stdin != nil { + w.stop() + } + + w.Unlock() +} + +func (w *eventWatcher) monitor() { + w.Lock() + watches := len(w.watches) + w.Unlock() + + if watches == 0 { + return + } + + scanner := bufio.NewScanner(w.stdout) + for scanner.Scan() { + id := strings.TrimSpace(scanner.Text()) + + w.Lock() + container := w.watches[id] + w.Unlock() + + if container != nil { + // this is called in a routine to allow an event consolidation window + go container.updated() + } + } +} + +func (w *eventWatcher) stop() { + if w.stdin != nil { + w.stdin.Close() + w.stdin = nil + } + if w.stdout != nil { + w.stdout.Close() + w.stdout = nil + } + w.process.Kill() +} diff --git a/simulator/container_host_system.go b/simulator/container_host_system.go index 59f477b35..41d4fd9a6 100644 --- a/simulator/container_host_system.go +++ b/simulator/container_host_system.go @@ -39,6 +39,7 @@ const PB = 1000 * TB const ( advOptPrefixPnicToUnderlayPrefix = "RUN.underlay." advOptContainerBackingImage = "RUN.container" + defaultUnderlayBridgeName = "vcsim-underlay" ) type simHost struct { @@ -133,6 +134,8 @@ func createSimHostMounts(ctx *Context, containerName string, mounts []types.Host // * array of networks to attach to // * array of commands to run // * error +// +// TODO: implement bridge network per DVS - not needed until container backed VMs are "created" on container backed "hosts" func createSimHostNetworks(ctx *Context, containerName string, networkInfo *types.HostNetworkInfo, advOpts *OptionManager) ([]string, [][]string, error) { var dockerNet []string var cmds [][]string @@ -140,8 +143,8 @@ func createSimHostNetworks(ctx *Context, containerName string, networkInfo *type existingNets := make(map[string]string) // a pnic does not have an IP so this is purely a connectivity statement, not a network identity, however this is not how docker works - // so we're going to end up with a veth (our pnic) that does have an IP assigned. - // For now we're going to simply ignore that IP. //TODO: figure out whether we _need_ to do something with it at this point + // so we're going to end up with a veth (our pnic) that does have an IP assigned. That IP will end up being used in a NetConfig structure associated + // with the pNIC. See HostSystem.getNetConfigInterface. for i := range networkInfo.Pnic { pnicName := networkInfo.Pnic[i].Device @@ -357,5 +360,3 @@ var defaultSimVolumes = []types.HostFileSystemMountInfo{ }, }, } - -const defaultUnderlayBridgeName = "vcsim-underlay" diff --git a/simulator/container_virtual_machine.go b/simulator/container_virtual_machine.go index c12a1e1eb..d790ac0c1 100644 --- a/simulator/container_virtual_machine.go +++ b/simulator/container_virtual_machine.go @@ -18,6 +18,7 @@ package simulator import ( "archive/tar" + "context" "encoding/hex" "encoding/json" "errors" @@ -294,14 +295,16 @@ func (svm *simVM) start(ctx *Context) error { log.Printf("%s inspect %s: %s", svm.vm.Name, svm.c.id, err) } - callback := func(ctx *Context, details *containerDetails, c *container) error { + callback := func(details *containerDetails, c *container) error { spoofctx := SpoofContext() - if c.id == "" { - // If the container cannot be found then destroy this VM. - // TODO: figure out if we should pass the vm/container via ctx or otherwise from the callback - this might cause locking issues. + if c.id == "" && svm.vm != nil { + // If the container cannot be found then destroy this VM unless the VM is no longer configured for container backing (svm.vm == nil) taskRef := svm.vm.DestroyTask(spoofctx, &types.Destroy_Task{This: svm.vm.Self}).(*methods.Destroy_TaskBody).Res.Returnval - task := ctx.Map.Get(taskRef).(*Task) + task, ok := spoofctx.Map.Get(taskRef).(*Task) + if !ok { + panic(fmt.Sprintf("couldn't retrieve task for moref %+q while deleting VM %s", taskRef, svm.vm.Name)) + } // Wait for the task to complete and see if there is an error. task.Wait() @@ -317,10 +320,10 @@ func (svm *simVM) start(ctx *Context) error { } // Start watching the container resource. - err = svm.c.watchContainer(ctx, callback) + err = svm.c.watchContainer(context.Background(), callback) if _, ok := err.(uninitializedContainer); ok { // the container has been deleted before we could watch, despite successful launch so clean up. - callback(ctx, nil, svm.c) + callback(nil, svm.c) // successful launch so nil the error return nil diff --git a/simulator/host_system.go b/simulator/host_system.go index eb07b6eb4..c52210948 100644 --- a/simulator/host_system.go +++ b/simulator/host_system.go @@ -87,6 +87,7 @@ func NewHostSystem(host mo.HostSystem) *HostSystem { // add a supported option to the AdvancedOption manager simOption := types.OptionDef{ElementDescription: types.ElementDescription{Key: advOptContainerBackingImage}} // TODO: how do we enter patterns here? Or should we stick to a list in the value? + // patterns become necessary if we want to enforce correctness on options for RUN.underlay. or allow RUN.port.xxx hs.Config.OptionDef = append(hs.Config.OptionDef, simOption) config := []struct { @@ -156,8 +157,6 @@ func (h *HostSystem) configure(ctx *Context, spec types.HostConnectSpec, connect // // The pnics will be named using standard pattern, ie. vmnic0, vmnic1, ... // This will sanity check the NetConfig for "management" nicType to ensure that it maps through PortGroup->vSwitch->pNIC to vmnic0. -// -// TODO: figure out which other HostVirtualNicSpec's need to be updated with IP, eg. Config.vMotion, Config.Network.vNIC func (h *HostSystem) configureContainerBacking(ctx *Context, image string, mounts []types.HostFileSystemMountInfo, networks ...string) error { option := &types.OptionValue{ Key: advOptContainerBackingImage, diff --git a/simulator/virtual_machine.go b/simulator/virtual_machine.go index ff43a26c5..d6671edff 100644 --- a/simulator/virtual_machine.go +++ b/simulator/virtual_machine.go @@ -478,7 +478,11 @@ func (vm *VirtualMachine) applyExtraConfig(ctx *Context, spec *types.VirtualMach } else if removedContainerBacking { err := vm.svm.remove(ctx) if err == nil { + // remove link from container to VM so callbacks no longer reflect state + vm.svm.vm = nil + // nil container backing reference to return this to a pure in-mem simulated VM vm.svm = nil + } else { // don't attempt to undo the changes already made - just return an error // we'll retry the svm.start operation on pause/restart calls From 2b5c457e155b692eae3ae64762fdee30272094fa Mon Sep 17 00:00:00 2001 From: George Hicken Date: Fri, 14 Jul 2023 05:57:18 -0700 Subject: [PATCH 7/8] vcsim: add interactive debug github action stage Adds a stage to the github actions pipeline that provides an ssh server that allows interactive login to the environment. This only triggers on failure. The reason for adding this is due to repeated failures to find functional arguements for the specific docker version present. Quirks around the format parameter values specifically. This is done using the tmate action: https://github.com/mxschmitt/action-tmate Corrects boilerplate --- .github/workflows/govmomi-go-tests.yaml | 3 +++ .github/workflows/govmomi-govc-tests.yaml | 4 ++++ simulator/container_host_system.go | 4 ++-- simulator/container_host_system_test.go | 4 ++-- simulator/container_virtual_machine.go | 4 ++-- simulator/container_virtual_machine_test.go | 4 ++-- 6 files changed, 15 insertions(+), 8 deletions(-) diff --git a/.github/workflows/govmomi-go-tests.yaml b/.github/workflows/govmomi-go-tests.yaml index 3275f231b..43f07871a 100644 --- a/.github/workflows/govmomi-go-tests.yaml +++ b/.github/workflows/govmomi-go-tests.yaml @@ -60,3 +60,6 @@ jobs: TEST_TIMEOUT: 5m TEST_OPTS: "" run: make go-test + - name: Debug with tmate on failure + if: ${{ failure() }} + uses: mxschmitt/action-tmate@v3 diff --git a/.github/workflows/govmomi-govc-tests.yaml b/.github/workflows/govmomi-govc-tests.yaml index e546f5d6f..c1f587cff 100644 --- a/.github/workflows/govmomi-govc-tests.yaml +++ b/.github/workflows/govmomi-govc-tests.yaml @@ -65,6 +65,10 @@ jobs: run: | make ${{ matrix.cmd }} + - name: Debug with tmate on failure + if: ${{ failure() }} + uses: mxschmitt/action-tmate@v3 + govc-docs: name: Verify govc docs are up2date strategy: diff --git a/simulator/container_host_system.go b/simulator/container_host_system.go index 41d4fd9a6..f84f3d840 100644 --- a/simulator/container_host_system.go +++ b/simulator/container_host_system.go @@ -1,11 +1,11 @@ /* -Copyright (c) 2017 VMware, Inc. All Rights Reserved. +Copyright (c) 2023-2023 VMware, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 +http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/simulator/container_host_system_test.go b/simulator/container_host_system_test.go index 8709b4bc7..1233bc97b 100644 --- a/simulator/container_host_system_test.go +++ b/simulator/container_host_system_test.go @@ -1,11 +1,11 @@ /* -Copyright (c) 2019 VMware, Inc. All Rights Reserved. +Copyright (c) 2023-2023 VMware, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 +http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/simulator/container_virtual_machine.go b/simulator/container_virtual_machine.go index d790ac0c1..a2b91fd86 100644 --- a/simulator/container_virtual_machine.go +++ b/simulator/container_virtual_machine.go @@ -1,11 +1,11 @@ /* -Copyright (c) 2018 VMware, Inc. All Rights Reserved. +Copyright (c) 2023-2023 VMware, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 +http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, diff --git a/simulator/container_virtual_machine_test.go b/simulator/container_virtual_machine_test.go index e36782a66..af7419a77 100644 --- a/simulator/container_virtual_machine_test.go +++ b/simulator/container_virtual_machine_test.go @@ -1,11 +1,11 @@ /* -Copyright (c) 2023 VMware, Inc. All Rights Reserved. +Copyright (c) 2023-2023 VMware, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - http://www.apache.org/licenses/LICENSE-2.0 +http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, From f636e960f599dd4ddc3622bf3b1f7f82618488ac Mon Sep 17 00:00:00 2001 From: George Hicken Date: Mon, 7 Aug 2023 16:17:33 +0000 Subject: [PATCH 8/8] Address review comments I expect to squash this into an earlier commit once it passes tests vcsim: support container backing for hosts --- simulator/container_host_system.go | 25 ++++--------- simulator/container_virtual_machine_test.go | 14 ++++++-- simulator/esx/host_config_filesystemvolume.go | 35 +++++++------------ simulator/host_system.go | 2 +- 4 files changed, 33 insertions(+), 43 deletions(-) diff --git a/simulator/container_host_system.go b/simulator/container_host_system.go index f84f3d840..c3d283abb 100644 --- a/simulator/container_host_system.go +++ b/simulator/container_host_system.go @@ -20,22 +20,11 @@ import ( "fmt" "strings" + "github.com/vmware/govmomi/units" "github.com/vmware/govmomi/vim25/methods" "github.com/vmware/govmomi/vim25/types" ) -const KiB = 1024 -const MiB = 1024 * KiB -const GiB = 1024 * MiB -const TiB = 1024 * GiB -const Pib = 1024 * TiB - -const KB = 1000 -const MB = 1000 * KB -const GB = 1000 * MB -const TB = 1000 * GB -const PB = 1000 * TB - const ( advOptPrefixPnicToUnderlayPrefix = "RUN.underlay." advOptContainerBackingImage = "RUN.container" @@ -74,8 +63,8 @@ func createSimHostMounts(ctx *Context, containerName string, mounts []types.Host switch vol := info.Volume.(type) { case *types.HostVmfsVolume: vol.BlockSizeMb = 1 - vol.BlockSize = KiB - vol.UnmapGranularity = KiB + vol.BlockSize = units.KB + vol.UnmapGranularity = units.KB vol.UnmapPriority = "low" vol.MajorVersion = 6 vol.Version = "6.82" @@ -309,7 +298,7 @@ var defaultSimVolumes = []types.HostFileSystemMountInfo{ HostFileSystemVolume: types.HostFileSystemVolume{ Type: "VMFS", Name: "datastore1", - Capacity: 1 * TiB, + Capacity: 1 * units.TB, }, Extent: []types.HostScsiDiskPartition{ { @@ -326,7 +315,7 @@ var defaultSimVolumes = []types.HostFileSystemMountInfo{ HostFileSystemVolume: types.HostFileSystemVolume{ Type: "OTHER", Name: "OSDATA-%__UUID__%", - Capacity: 128 * GiB, + Capacity: 128 * units.GB, }, Extent: []types.HostScsiDiskPartition{ { @@ -343,7 +332,7 @@ var defaultSimVolumes = []types.HostFileSystemMountInfo{ HostFileSystemVolume: types.HostFileSystemVolume{ Type: "OTHER", Name: "BOOTBANK1", - Capacity: 4 * GiB, + Capacity: 4 * units.GB, }, }, }, @@ -355,7 +344,7 @@ var defaultSimVolumes = []types.HostFileSystemMountInfo{ HostFileSystemVolume: types.HostFileSystemVolume{ Type: "OTHER", Name: "BOOTBANK2", - Capacity: 4 * GiB, + Capacity: 4 * units.GB, }, }, }, diff --git a/simulator/container_virtual_machine_test.go b/simulator/container_virtual_machine_test.go index af7419a77..de32bd375 100644 --- a/simulator/container_virtual_machine_test.go +++ b/simulator/container_virtual_machine_test.go @@ -27,6 +27,8 @@ import ( "strings" "testing" + "github.com/stretchr/testify/require" + "github.com/vmware/govmomi/find" "github.com/vmware/govmomi/object" "github.com/vmware/govmomi/vim25" @@ -39,14 +41,22 @@ import ( // port - the port to forward to the container port 80 func constructNginxBacking(t *testing.T, content string, port int) []types.BaseOptionValue { dir := t.TempDir() + // experience shows that a parent directory created as part of the TempDir call may not have + // o+rx, preventing use within a container that doesn't have the same uid for dirpart := dir; dirpart != "/"; dirpart = filepath.Dir(dirpart) { os.Chmod(dirpart, 0755) + stat, err := os.Stat(dirpart) + require.Nil(t, err, "must be able to check file and directory permissions") + require.NotZero(t, stat.Mode()&0005, "does not have o+rx permissions", dirpart) } fpath := filepath.Join(dir, "index.html") - os.WriteFile(fpath, []byte(content), 0644) + err := os.WriteFile(fpath, []byte(content), 0644) + require.Nil(t, err, "Expected to cleanly write content to file: %s", err) + // just in case umask gets in the way - os.Chmod(fpath, 0644) + err = os.Chmod(fpath, 0644) + require.Nil(t, err, "Expected to cleanly set file permissions on content: %s", err) args := fmt.Sprintf("-v '%s:/usr/share/nginx/html:ro' nginx", dir) diff --git a/simulator/esx/host_config_filesystemvolume.go b/simulator/esx/host_config_filesystemvolume.go index 578fc5f64..01c62d0a4 100644 --- a/simulator/esx/host_config_filesystemvolume.go +++ b/simulator/esx/host_config_filesystemvolume.go @@ -16,19 +16,10 @@ limitations under the License. package esx -import "github.com/vmware/govmomi/vim25/types" - -const KiB = 1024 -const MiB = 1024 * KiB -const GiB = 1024 * MiB -const TiB = 1024 * GiB -const Pib = 1024 * TiB - -const KB = 1000 -const MB = 1000 * KB -const GB = 1000 * MB -const TB = 1000 * GB -const PB = 1000 * TB +import ( + "github.com/vmware/govmomi/units" + "github.com/vmware/govmomi/vim25/types" +) // HostConfigInfo is the default template for the HostSystem config property. // Capture method: @@ -50,14 +41,14 @@ var HostFileSystemVolumeInfo = types.HostFileSystemVolumeInfo{ HostFileSystemVolume: types.HostFileSystemVolume{ Type: "VMFS", Name: "datastore1", - Capacity: 3.5 * TiB, + Capacity: 3.5 * units.TB, }, BlockSizeMb: 1, - BlockSize: KiB, - UnmapGranularity: KiB, + BlockSize: units.KB, + UnmapGranularity: units.KB, UnmapPriority: "low", UnmapBandwidthSpec: (*types.VmfsUnmapBandwidthSpec)(nil), - MaxBlocks: 61 * MiB, + MaxBlocks: 61 * units.MB, MajorVersion: 6, Version: "6.82", Uuid: "deadbeef-01234567-89ab-cdef00000003", @@ -88,14 +79,14 @@ var HostFileSystemVolumeInfo = types.HostFileSystemVolumeInfo{ HostFileSystemVolume: types.HostFileSystemVolume{ Type: "OTHER", Name: "OSDATA-deadbeef-01234567-89ab-cdef00000002", - Capacity: 128 * GiB, + Capacity: 128 * units.GB, }, BlockSizeMb: 1, - BlockSize: KiB, + BlockSize: units.KB, UnmapGranularity: 0, UnmapPriority: "", UnmapBandwidthSpec: (*types.VmfsUnmapBandwidthSpec)(nil), - MaxBlocks: 256 * KiB, + MaxBlocks: 256 * units.KB, MajorVersion: 1, Version: "1.00", Uuid: "deadbeef-01234567-89ab-cdef00000002", @@ -126,7 +117,7 @@ var HostFileSystemVolumeInfo = types.HostFileSystemVolumeInfo{ HostFileSystemVolume: types.HostFileSystemVolume{ Type: "OTHER", Name: "BOOTBANK1", - Capacity: 4 * GiB, + Capacity: 4 * units.GB, }, }, VStorageSupport: "", @@ -144,7 +135,7 @@ var HostFileSystemVolumeInfo = types.HostFileSystemVolumeInfo{ HostFileSystemVolume: types.HostFileSystemVolume{ Type: "OTHER", Name: "BOOTBANK2", - Capacity: 4 * GiB, + Capacity: 4 * units.GB, }, }, VStorageSupport: "", diff --git a/simulator/host_system.go b/simulator/host_system.go index c52210948..f8cd3fe7c 100644 --- a/simulator/host_system.go +++ b/simulator/host_system.go @@ -163,7 +163,7 @@ func (h *HostSystem) configureContainerBacking(ctx *Context, image string, mount Value: image, } - advOpts := Map.Get(h.ConfigManager.AdvancedOption.Reference()).(*OptionManager) + advOpts := ctx.Map.Get(h.ConfigManager.AdvancedOption.Reference()).(*OptionManager) fault := advOpts.UpdateOptions(&types.UpdateOptions{ChangedValue: []types.BaseOptionValue{option}}).Fault() if fault != nil { panic(fault)