diff --git a/.github/workflows/govmomi-go-tests.yaml b/.github/workflows/govmomi-go-tests.yaml index 3275f231b..43f07871a 100644 --- a/.github/workflows/govmomi-go-tests.yaml +++ b/.github/workflows/govmomi-go-tests.yaml @@ -60,3 +60,6 @@ jobs: TEST_TIMEOUT: 5m TEST_OPTS: "" run: make go-test + - name: Debug with tmate on failure + if: ${{ failure() }} + uses: mxschmitt/action-tmate@v3 diff --git a/.github/workflows/govmomi-govc-tests.yaml b/.github/workflows/govmomi-govc-tests.yaml index e546f5d6f..c1f587cff 100644 --- a/.github/workflows/govmomi-govc-tests.yaml +++ b/.github/workflows/govmomi-govc-tests.yaml @@ -65,6 +65,10 @@ jobs: run: | make ${{ matrix.cmd }} + - name: Debug with tmate on failure + if: ${{ failure() }} + uses: mxschmitt/action-tmate@v3 + govc-docs: name: Verify govc docs are up2date strategy: diff --git a/Makefile b/Makefile index bd287802a..0f31649af 100644 --- a/Makefile +++ b/Makefile @@ -136,7 +136,7 @@ endif .PHONY: go-test go-test: ## Runs go unit tests with race detector enabled - GORACE=$(GORACE) $(GO) test \ + GORACE=$(GORACE) CGO_ENABLED=1 $(GO) test \ -count $(TEST_COUNT) \ -race \ -timeout $(TEST_TIMEOUT) \ diff --git a/govc/test/vcsim.bats b/govc/test/vcsim.bats index fad4a40c1..4c9bb850c 100755 --- a/govc/test/vcsim.bats +++ b/govc/test/vcsim.bats @@ -340,7 +340,7 @@ EOF run docker inspect -f '{{.State.Status}}' "$name" assert_success "running" - run docker volume inspect "$name" + run docker volume inspect "$name--dmi" assert_success run govc vm.destroy $vm diff --git a/simulator/cluster_compute_resource.go b/simulator/cluster_compute_resource.go index f86fa7ed4..abe001665 100644 --- a/simulator/cluster_compute_resource.go +++ b/simulator/cluster_compute_resource.go @@ -66,7 +66,7 @@ func (add *addHost) Run(task *Task) (types.AnyType, types.BaseMethodFault) { } host := NewHostSystem(template) - host.configure(spec, add.req.AsConnected) + host.configure(task.ctx, spec, add.req.AsConnected) task.ctx.Map.PutEntity(cr, task.ctx.Map.NewEntity(host)) host.Summary.Host = &host.Self diff --git a/simulator/container.go b/simulator/container.go index fec1c0f48..f39ef9970 100644 --- a/simulator/container.go +++ b/simulator/container.go @@ -18,29 +18,32 @@ package simulator import ( "archive/tar" + "bufio" "bytes" - "encoding/hex" + "context" "encoding/json" + "errors" "fmt" "io" "log" - "net/http" + "net" "os" "os/exec" "path" "regexp" - "strconv" "strings" + "sync" "time" - - "github.com/google/uuid" - - "github.com/vmware/govmomi/vim25/methods" - "github.com/vmware/govmomi/vim25/types" ) var ( - shell = "/bin/sh" + shell = "/bin/sh" + eventWatch eventWatcher +) + +const ( + deleteWithContainer = "lifecycle=container" + createdByVcsim = "createdBy=vcsim" ) func init() { @@ -49,10 +52,26 @@ func init() { } } +type eventWatcher struct { + sync.Mutex + + stdin io.WriteCloser + stdout io.ReadCloser + process *os.Process + + // watches is a map of container IDs to container objects + watches map[string]*container +} + // container provides methods to manage a container within a simulator VM lifecycle. type container struct { + sync.Mutex + id string name string + + cancelWatch context.CancelFunc + changes chan struct{} } type networkSettings struct { @@ -62,551 +81,753 @@ type networkSettings struct { MacAddress string } -// inspect applies container network settings to vm.Guest properties. -func (c *container) inspect(vm *VirtualMachine) error { - if c.id == "" { - return nil +type containerDetails struct { + State struct { + Running bool + Paused bool } + NetworkSettings struct { + networkSettings + Networks map[string]networkSettings + } +} - var objects []struct { - State struct { - Running bool - Paused bool - } - NetworkSettings struct { - networkSettings - Networks map[string]networkSettings - } +type unknownContainer error +type uninitializedContainer error + +var sanitizeNameRx = regexp.MustCompile(`[\(\)\s]`) + +func sanitizeName(name string) string { + return sanitizeNameRx.ReplaceAllString(name, "-") +} + +func constructContainerName(name, uid string) string { + return fmt.Sprintf("vcsim-%s-%s", sanitizeName(name), uid) +} + +func constructVolumeName(containerName, uid, volumeName string) string { + return constructContainerName(containerName, uid) + "--" + sanitizeName(volumeName) +} + +func extractNameAndUid(containerName string) (name string, uid string, err error) { + parts := strings.Split(strings.TrimPrefix(containerName, "vcsim-"), "-") + if len(parts) != 2 { + err = fmt.Errorf("container name does not match expected vcsim-name-uid format: %s", containerName) + return } - cmd := exec.Command("docker", "inspect", c.id) - out, err := cmd.Output() + return parts[0], parts[1], nil +} + +func prefixToMask(prefix int) string { + mask := net.CIDRMask(prefix, 32) + return fmt.Sprintf("%d.%d.%d.%d", mask[0], mask[1], mask[2], mask[3]) +} + +type tarEntry struct { + header *tar.Header + content []byte +} + +// From https://docs.docker.com/engine/reference/commandline/cp/ : +// > It is not possible to copy certain system files such as resources under /proc, /sys, /dev, tmpfs, and mounts created by the user in the container. +// > However, you can still copy such files by manually running tar in docker exec. +func copyToGuest(id string, dest string, length int64, reader io.Reader) error { + cmd := exec.Command("docker", "exec", "-i", id, "tar", "Cxf", path.Dir(dest), "-") + cmd.Stderr = os.Stderr + stdin, err := cmd.StdinPipe() if err != nil { return err } - if err = json.NewDecoder(bytes.NewReader(out)).Decode(&objects); err != nil { + + err = cmd.Start() + if err != nil { return err } - vm.Config.Annotation = strings.Join(cmd.Args, " ") - vm.logPrintf("%s: %s", vm.Config.Annotation, string(out)) - - for _, o := range objects { - s := o.NetworkSettings.networkSettings + tw := tar.NewWriter(stdin) + _ = tw.WriteHeader(&tar.Header{ + Name: path.Base(dest), + Size: length, + Mode: 0444, + ModTime: time.Now(), + }) - for _, n := range o.NetworkSettings.Networks { - s = n - break - } + _, err = io.Copy(tw, reader) - if o.State.Paused { - vm.Runtime.PowerState = types.VirtualMachinePowerStateSuspended - } else if o.State.Running { - vm.Runtime.PowerState = types.VirtualMachinePowerStatePoweredOn - } else { - vm.Runtime.PowerState = types.VirtualMachinePowerStatePoweredOff - } + twErr := tw.Close() + stdinErr := stdin.Close() - vm.Guest.IpAddress = s.IPAddress - vm.Summary.Guest.IpAddress = s.IPAddress - - if len(vm.Guest.Net) != 0 { - net := &vm.Guest.Net[0] - net.IpAddress = []string{s.IPAddress} - net.MacAddress = s.MacAddress - net.IpConfig = &types.NetIpConfigInfo{ - IpAddress: []types.NetIpConfigInfoIpAddress{{ - IpAddress: s.IPAddress, - PrefixLength: int32(s.IPPrefixLen), - State: string(types.NetIpConfigInfoIpAddressStatusPreferred), - }}, - } - } + waitErr := cmd.Wait() - for _, d := range vm.Config.Hardware.Device { - if eth, ok := d.(types.BaseVirtualEthernetCard); ok { - eth.GetVirtualEthernetCard().MacAddress = s.MacAddress - break - } - } + if err != nil || twErr != nil || stdinErr != nil || waitErr != nil { + return fmt.Errorf("copy: {%s}, tw: {%s}, stdin: {%s}, wait: {%s}", err, twErr, stdinErr, waitErr) } return nil } -func (c *container) prepareGuestOperation( - vm *VirtualMachine, - auth types.BaseGuestAuthentication) types.BaseMethodFault { - - if c.id == "" { - return new(types.GuestOperationsUnavailable) +func copyFromGuest(id string, src string, sink func(int64, io.Reader) error) error { + cmd := exec.Command("docker", "exec", id, "tar", "Ccf", path.Dir(src), "-", path.Base(src)) + cmd.Stderr = os.Stderr + stdout, err := cmd.StdoutPipe() + if err != nil { + return err } - if vm.Runtime.PowerState != types.VirtualMachinePowerStatePoweredOn { - return &types.InvalidPowerState{ - RequestedState: types.VirtualMachinePowerStatePoweredOn, - ExistingState: vm.Runtime.PowerState, - } + if err = cmd.Start(); err != nil { + return err } - switch creds := auth.(type) { - case *types.NamePasswordAuthentication: - if creds.Username == "" || creds.Password == "" { - return new(types.InvalidGuestLogin) - } - default: - return new(types.InvalidGuestLogin) + + tr := tar.NewReader(stdout) + header, err := tr.Next() + if err != nil { + return err } - return nil -} -var sanitizeNameRx = regexp.MustCompile(`[\(\)\s]`) + err = sink(header.Size, tr) + waitErr := cmd.Wait() -func sanitizeName(name string) string { - return sanitizeNameRx.ReplaceAllString(name, "-") + if err != nil || waitErr != nil { + return fmt.Errorf("err: {%s}, wait: {%s}", err, waitErr) + } + + return nil } -// createDMI writes BIOS UUID DMI files to a container volume -func (c *container) createDMI(vm *VirtualMachine, name string) error { +// createVolume creates a volume populated with the provided files +// If the header.Size is omitted or set to zero, then len(content+1) is used. +// Docker appears to treat this volume create command as idempotent so long as it's identical +// to an existing volume, so we can use this both for creating volumes inline in container create (for labelling) and +// for population after. +// returns: +// +// uid - string +// err - error or nil +func createVolume(volumeName string, labels []string, files []tarEntry) (string, error) { image := os.Getenv("VCSIM_BUSYBOX") if image == "" { image = "busybox" } - cmd := exec.Command("docker", "run", "--rm", "-i", "-v", name+":"+"/"+name, image, "tar", "-C", "/"+name, "-xf", "-") + name := sanitizeName(volumeName) + uid := "" + + // label the volume if specified - this requires the volume be created before use + if len(labels) > 0 { + run := []string{"volume", "create"} + for i := range labels { + run = append(run, "--label", labels[i]) + } + run = append(run, name) + cmd := exec.Command("docker", run...) + out, err := cmd.Output() + if err != nil { + return "", err + } + uid = strings.TrimSpace(string(out)) + + if name == "" { + name = uid + } + } + + run := []string{"run", "--rm", "-i"} + run = append(run, "-v", name+":/"+name) + run = append(run, image, "tar", "-C", "/"+name, "-xf", "-") + cmd := exec.Command("docker", run...) stdin, err := cmd.StdinPipe() if err != nil { - return err + return uid, err } err = cmd.Start() if err != nil { - return err + return uid, err } tw := tar.NewWriter(stdin) - dmi := []struct { - name string - val func(uuid.UUID) string - }{ - {"product_uuid", productUUID}, - {"product_serial", productSerial}, - } + for _, file := range files { + header := file.header + + if header.Size == 0 && len(file.content) > 0 { + header.Size = int64(len(file.content)) + } + + if header.ModTime.IsZero() { + header.ModTime = time.Now() + } - for _, file := range dmi { - val := file.val(vm.uid) - _ = tw.WriteHeader(&tar.Header{ - Name: file.name, - Size: int64(len(val) + 1), - Mode: 0444, - ModTime: time.Now(), - }) - _, _ = fmt.Fprintln(tw, val) + if header.Mode == 0 { + header.Mode = 0444 + } + + tarErr := tw.WriteHeader(header) + if tarErr == nil { + _, tarErr = tw.Write(file.content) + } } - _ = tw.Close() - _ = stdin.Close() + err = nil + twErr := tw.Close() + stdinErr := stdin.Close() + if twErr != nil || stdinErr != nil { + err = fmt.Errorf("tw: {%s}, stdin: {%s}", twErr, stdinErr) + } - if err := cmd.Wait(); err != nil { + if waitErr := cmd.Wait(); waitErr != nil { stderr := "" - if xerr, ok := err.(*exec.ExitError); ok { + if xerr, ok := waitErr.(*exec.ExitError); ok { stderr = string(xerr.Stderr) } - log.Printf("%s %s: %s %s", vm.Name, cmd.Args, err, stderr) - return err + log.Printf("%s %s: %s %s", name, cmd.Args, waitErr, stderr) + + err = fmt.Errorf("%s, wait: {%s}", err, waitErr) + return uid, err } - return nil + return uid, err } -var ( - toolsRunning = []types.PropertyChange{ - {Name: "guest.toolsStatus", Val: types.VirtualMachineToolsStatusToolsOk}, - {Name: "guest.toolsRunningStatus", Val: string(types.VirtualMachineToolsRunningStatusGuestToolsRunning)}, +func getBridge(bridgeName string) (string, error) { + // {"CreatedAt":"2023-07-11 19:22:25.45027052 +0000 UTC","Driver":"bridge","ID":"fe52c7502c5d","IPv6":"false","Internal":"false","Labels":"goodbye=,hello=","Name":"testnet","Scope":"local"} + // podman has distinctly different fields at v4.4.1 so commented out fields that don't match. We only actually care about ID + type bridgeNet struct { + // CreatedAt string + Driver string + ID string + // IPv6 string + // Internal string + // Labels string + Name string + // Scope string + } + + // if the underlay bridge already exists, return that + // we don't check for a specific label or similar so that it's possible to use a bridge created by other frameworks for composite testing + var bridge bridgeNet + cmd := exec.Command("docker", "network", "ls", "--format={{json .}}", "-f", fmt.Sprintf("name=%s$", bridgeName)) + out, err := cmd.Output() + if err != nil { + log.Printf("vcsim %s: %s, %s", cmd.Args, err, out) + return "", err } - toolsNotRunning = []types.PropertyChange{ - {Name: "guest.toolsStatus", Val: types.VirtualMachineToolsStatusToolsNotRunning}, - {Name: "guest.toolsRunningStatus", Val: string(types.VirtualMachineToolsRunningStatusGuestToolsNotRunning)}, + // unfortunately docker returns an empty string not an empty json doc and podman returns '[]' + // podman also returns an array of matches even when there's only one, so we normalize. + str := strings.TrimSpace(string(out)) + str = strings.TrimPrefix(str, "[") + str = strings.TrimSuffix(str, "]") + if len(str) == 0 { + return "", nil } -) -// start runs the container if specified by the RUN.container extraConfig property. -func (c *container) start(ctx *Context, vm *VirtualMachine) { - if c.id != "" { - start := "start" - if vm.Runtime.PowerState == types.VirtualMachinePowerStateSuspended { - start = "unpause" - } - cmd := exec.Command("docker", start, c.id) - err := cmd.Run() - if err != nil { - log.Printf("%s %s: %s", vm.Name, cmd.Args, err) - } else { - ctx.Map.Update(vm, toolsRunning) - } - return + err = json.Unmarshal([]byte(str), &bridge) + if err != nil { + log.Printf("vcsim %s: %s, %s", cmd.Args, err, str) + return "", err } - var args []string - var env []string - mountDMI := true - ports := make(map[string]string) + return bridge.ID, nil +} - for _, opt := range vm.Config.ExtraConfig { - val := opt.GetOptionValue() - if val.Key == "RUN.container" { - run := val.Value.(string) - err := json.Unmarshal([]byte(run), &args) - if err != nil { - args = []string{run} - } +// createBridge creates a bridge network if one does not already exist +// returns: +// +// uid - string +// err - error or nil +func createBridge(bridgeName string, labels ...string) (string, error) { - continue - } - if val.Key == "RUN.mountdmi" { - var mount bool - err := json.Unmarshal([]byte(val.Value.(string)), &mount) - if err == nil { - mountDMI = mount - } - } - if strings.HasPrefix(val.Key, "RUN.port.") { - sKey := strings.Split(val.Key, ".") - containerPort := sKey[len(sKey)-1] - ports[containerPort] = val.Value.(string) - } - if strings.HasPrefix(val.Key, "RUN.env.") { - sKey := strings.Split(val.Key, ".") - envKey := sKey[len(sKey)-1] - env = append(env, "--env", fmt.Sprintf("%s=%s", envKey, val.Value.(string))) - } - if strings.HasPrefix(val.Key, "guestinfo.") { - key := strings.Replace(strings.ToUpper(val.Key), ".", "_", -1) - env = append(env, "--env", fmt.Sprintf("VMX_%s=%s", key, val.Value.(string))) - } + id, err := getBridge(bridgeName) + if err != nil { + return "", err } - if len(args) == 0 { - return + if id != "" { + return id, nil } - if len(env) != 0 { - // Configure env as the data access method for cloud-init-vmware-guestinfo - env = append(env, "--env", "VMX_GUESTINFO=true") + + run := []string{"network", "create", "--label", createdByVcsim} + for i := range labels { + run = append(run, "--label", labels[i]) } - if len(ports) != 0 { - // Publish the specified container ports - for containerPort, hostPort := range ports { - env = append(env, "-p", fmt.Sprintf("%s:%s", hostPort, containerPort)) - } + run = append(run, bridgeName) + + cmd := exec.Command("docker", run...) + out, err := cmd.Output() + if err != nil { + log.Printf("vcsim %s: %s: %s", cmd.Args, out, err) + return "", err } - c.name = fmt.Sprintf("vcsim-%s-%s", sanitizeName(vm.Name), vm.uid) - run := append([]string{"docker", "run", "-d", "--name", c.name}, env...) + // docker returns the ID regardless of whether you supply a name when creating the network, however + // podman returns the pretty name, so we have to normalize + id, err = getBridge(bridgeName) + if err != nil { + return "", err + } - if mountDMI { - if err := c.createDMI(vm, c.name); err != nil { - return - } - run = append(run, "-v", fmt.Sprintf("%s:%s:ro", c.name, "/sys/class/dmi/id")) + return id, nil +} + +// create +// - name - pretty name, eg. vm name +// - id - uuid or similar - this is merged into container name rather than dictating containerID +// - networks - set of bridges to connect the container to +// - volumes - colon separated tuple of volume name to mount path. Passed directly to docker via -v so mount options can be postfixed. +// - env - array of environment vairables in name=value form +// - optsAndImage - pass-though options and must include at least the container image to use, including tag if necessary +// - args - the command+args to pass to the container +func create(ctx *Context, name string, id string, networks []string, volumes []string, ports []string, env []string, image string, args []string) (*container, error) { + if len(image) == 0 { + return nil, errors.New("cannot create container backing without an image") + } + + var c container + c.name = constructContainerName(name, id) + c.changes = make(chan struct{}) + + for i := range volumes { + // we'll pre-create anonymous volumes, simply for labelling consistency + volName := strings.Split(volumes[i], ":") + createVolume(volName[0], []string{deleteWithContainer, "container=" + c.name}, nil) } - args = append(run, args...) - cmd := exec.Command(shell, "-c", strings.Join(args, " ")) + // assemble env + var dockerNet []string + var dockerVol []string + var dockerPort []string + var dockerEnv []string + + for i := range env { + dockerEnv = append(dockerEnv, "--env", env[i]) + } + + for i := range volumes { + dockerVol = append(dockerVol, "-v", volumes[i]) + } + + for i := range ports { + dockerPort = append(dockerPort, "-p", ports[i]) + } + + for i := range networks { + dockerNet = append(dockerNet, "--network", networks[i]) + } + + run := []string{"docker", "create", "--name", c.name} + run = append(run, dockerNet...) + run = append(run, dockerVol...) + run = append(run, dockerPort...) + run = append(run, dockerEnv...) + run = append(run, image) + run = append(run, args...) + + // this combines all the run options into a single string that's passed to /bin/bash -c as the single argument to force bash parsing. + // TODO: make this configurable behaviour so users also have the option of not escaping everything for bash + cmd := exec.Command(shell, "-c", strings.Join(run, " ")) out, err := cmd.Output() if err != nil { stderr := "" if xerr, ok := err.(*exec.ExitError); ok { stderr = string(xerr.Stderr) } - log.Printf("%s %s: %s %s", vm.Name, cmd.Args, err, stderr) - return + log.Printf("%s %s: %s %s", name, cmd.Args, err, stderr) + + return nil, err } - ctx.Map.Update(vm, toolsRunning) c.id = strings.TrimSpace(string(out)) - vm.logPrintf("%s %s: %s", cmd.Path, cmd.Args, c.id) - if err = c.inspect(vm); err != nil { - log.Printf("%s inspect %s: %s", vm.Name, c.id, err) - } - - // Start watching the container resource. - go c.watchContainer(vm) + return &c, nil } -// watchContainer monitors the underlying container and updates the VM -// properties based on the container status. This occurs until either -// the container or the VM is removed. -func (c *container) watchContainer(vm *VirtualMachine) { +// createVolume takes the specified files and writes them into a volume named for the container. +func (c *container) createVolume(name string, labels []string, files []tarEntry) (string, error) { + return createVolume(c.name+"--"+name, append(labels, "container="+c.name), files) +} - inspectInterval := time.Duration(5 * time.Second) - if d, err := time.ParseDuration(os.Getenv("VCSIM_INSPECT_INTERVAL")); err == nil { - inspectInterval = d +// inspect retrieves and parses container properties into directly usable struct +// returns: +// +// out - the stdout of the command +// detail - basic struct populated with container details +// err: +// * if c.id is empty, or docker returns "No such object", will return an uninitializedContainer error +// * err from either execution or parsing of json output +func (c *container) inspect() (out []byte, detail containerDetails, err error) { + c.Lock() + id := c.id + c.Unlock() + + if id == "" { + err = uninitializedContainer(errors.New("inspect of uninitialized container")) + return } - var ( - ctx = SpoofContext() - done = make(chan struct{}) - ticker = time.NewTicker(inspectInterval) - ) + var details []containerDetails - stopUpdatingVmFromContainer := func() { - ticker.Stop() - close(done) + cmd := exec.Command("docker", "inspect", c.id) + out, err = cmd.Output() + if eErr, ok := err.(*exec.ExitError); ok { + if strings.Contains(string(eErr.Stderr), "No such object") { + err = uninitializedContainer(errors.New("inspect of uninitialized container")) + } } - destroyVm := func() { - // If the container cannot be found then destroy this VM. - taskRef := vm.DestroyTask(ctx, &types.Destroy_Task{ - This: vm.Self, - }).(*methods.Destroy_TaskBody).Res.Returnval - task := ctx.Map.Get(taskRef).(*Task) + if err != nil { + return + } - // Wait for the task to complete and see if there is an error. - task.Wait() - if task.Info.Error != nil { - vm.logPrintf("failed to destroy vm: err=%v", *task.Info.Error) - } + if err = json.NewDecoder(bytes.NewReader(out)).Decode(&details); err != nil { + return } - updateVmFromContainer := func() { - // Exit the monitor loop if the VM was removed from the API side. - if c.id == "" { - stopUpdatingVmFromContainer() - return - } + if len(details) != 1 { + err = fmt.Errorf("multiple containers (%d) match ID: %s", len(details), c.id) + return + } - if err := c.inspect(vm); err != nil { - // If there is an error inspecting the container because it no - // longer exists, then destroy the VM as well. Please note the - // reason this logic does not invoke stopUpdatingVmFromContainer - // is because that will be handled the next time this function - // is entered and c.id is empty. - if err, ok := err.(*exec.ExitError); ok { - if strings.Contains(string(err.Stderr), "No such object") { - destroyVm() - } - } - } + detail = details[0] + return +} + +// start +// - if the container already exists, start it or unpause it. +func (c *container) start(ctx *Context) error { + c.Lock() + id := c.id + c.Unlock() + + if id == "" { + return uninitializedContainer(errors.New("start of uninitialized container")) } - // Update the VM from the container at regular intervals until the done - // channel is closed. - for { - select { - case <-ticker.C: - ctx.WithLock(vm, updateVmFromContainer) - case <-done: - return - } + start := "start" + _, detail, err := c.inspect() + if err != nil { + return err } -} -// stop the container (if any) for the given vm. -func (c *container) stop(ctx *Context, vm *VirtualMachine) { - if c.id == "" { - return + if detail.State.Paused { + start = "unpause" } - cmd := exec.Command("docker", "stop", c.id) - err := cmd.Run() + cmd := exec.Command("docker", start, c.id) + err = cmd.Run() if err != nil { - log.Printf("%s %s: %s", vm.Name, cmd.Args, err) - } else { - ctx.Map.Update(vm, toolsNotRunning) + log.Printf("%s %s: %s", c.name, cmd.Args, err) } + + return err } // pause the container (if any) for the given vm. -func (c *container) pause(ctx *Context, vm *VirtualMachine) { - if c.id == "" { - return +func (c *container) pause(ctx *Context) error { + c.Lock() + id := c.id + c.Unlock() + + if id == "" { + return uninitializedContainer(errors.New("pause of uninitialized container")) } cmd := exec.Command("docker", "pause", c.id) err := cmd.Run() if err != nil { - log.Printf("%s %s: %s", vm.Name, cmd.Args, err) - } else { - ctx.Map.Update(vm, toolsNotRunning) + log.Printf("%s %s: %s", c.name, cmd.Args, err) } + + return err } // restart the container (if any) for the given vm. -func (c *container) restart(ctx *Context, vm *VirtualMachine) { - if c.id == "" { - return +func (c *container) restart(ctx *Context) error { + c.Lock() + id := c.id + c.Unlock() + + if id == "" { + return uninitializedContainer(errors.New("restart of uninitialized container")) } cmd := exec.Command("docker", "restart", c.id) err := cmd.Run() if err != nil { - log.Printf("%s %s: %s", vm.Name, cmd.Args, err) - } else { - ctx.Map.Update(vm, toolsRunning) + log.Printf("%s %s: %s", c.name, cmd.Args, err) } + + return err } -// remove the container (if any) for the given vm. -func (c *container) remove(vm *VirtualMachine) { - if c.id == "" { - return - } +// stop the container (if any) for the given vm. +func (c *container) stop(ctx *Context) error { + c.Lock() + id := c.id + c.Unlock() - args := [][]string{ - {"rm", "-v", "-f", c.id}, - {"volume", "rm", "-f", c.name}, + if id == "" { + return uninitializedContainer(errors.New("stop of uninitialized container")) } - for i := range args { - cmd := exec.Command("docker", args[i]...) - err := cmd.Run() - if err != nil { - log.Printf("%s %s: %s", vm.Name, cmd.Args, err) - } + cmd := exec.Command("docker", "stop", c.id) + err := cmd.Run() + if err != nil { + log.Printf("%s %s: %s", c.name, cmd.Args, err) } - c.id = "" + return err } -func (c *container) exec(ctx *Context, vm *VirtualMachine, auth types.BaseGuestAuthentication, args []string) (string, types.BaseMethodFault) { - fault := vm.run.prepareGuestOperation(vm, auth) - if fault != nil { - return "", fault +// exec invokes the specified command, with executable being the first of the args, in the specified container +// returns +// +// string - combined stdout and stderr from command +// err +// * uninitializedContainer error - if c.id is empty +// * err from cmd execution +func (c *container) exec(ctx *Context, args []string) (string, error) { + c.Lock() + id := c.id + c.Unlock() + + if id == "" { + return "", uninitializedContainer(errors.New("exec into uninitialized container")) } - args = append([]string{"exec", vm.run.id}, args...) + args = append([]string{"exec", c.id}, args...) cmd := exec.Command("docker", args...) - res, err := cmd.CombinedOutput() if err != nil { - log.Printf("%s: %s (%s)", vm.Self, cmd.Args, string(res)) - return "", new(types.GuestOperationsFault) + log.Printf("%s: %s (%s)", c.name, cmd.Args, string(res)) + return "", err } return strings.TrimSpace(string(res)), nil } -// From https://docs.docker.com/engine/reference/commandline/cp/ : -// > It is not possible to copy certain system files such as resources under /proc, /sys, /dev, tmpfs, and mounts created by the user in the container. -// > However, you can still copy such files by manually running tar in docker exec. -func guestUpload(id string, file string, r *http.Request) error { - cmd := exec.Command("docker", "exec", "-i", id, "tar", "Cxf", path.Dir(file), "-") - cmd.Stderr = os.Stderr - stdin, err := cmd.StdinPipe() +// remove the container (if any) for the given vm. Considers removal of an uninitialized container success. +// Also removes volumes and networks that indicate they are lifecycle coupled with this container. +// returns: +// +// err - joined err from deletion of container and any volumes or networks that have coupled lifecycle +func (c *container) remove(ctx *Context) error { + c.Lock() + defer c.Unlock() + + if c.id == "" { + // consider absence success + return nil + } + + cmd := exec.Command("docker", "rm", "-v", "-f", c.id) + err := cmd.Run() if err != nil { + log.Printf("%s %s: %s", c.name, cmd.Args, err) return err } - if err = cmd.Start(); err != nil { - return err + + cmd = exec.Command("docker", "volume", "ls", "-q", "--filter", "label=container="+c.name, "--filter", "label="+deleteWithContainer) + volumesToReap, lsverr := cmd.Output() + if lsverr != nil { + log.Printf("%s %s: %s", c.name, cmd.Args, lsverr) } + log.Printf("%s volumes: %s", c.name, volumesToReap) - tw := tar.NewWriter(stdin) - _ = tw.WriteHeader(&tar.Header{ - Name: path.Base(file), - Size: r.ContentLength, - Mode: 0444, - ModTime: time.Now(), - }) + var rmverr error + if len(volumesToReap) > 0 { + run := []string{"volume", "rm", "-f"} + run = append(run, strings.Split(string(volumesToReap), "\n")...) + cmd = exec.Command("docker", run...) + out, rmverr := cmd.Output() + if rmverr != nil { + log.Printf("%s %s: %s, %s", c.name, cmd.Args, rmverr, out) + } + } + + cmd = exec.Command("docker", "network", "ls", "-q", "--filter", "label=container="+c.name, "--filter", "label="+deleteWithContainer) + networksToReap, lsnerr := cmd.Output() + if lsnerr != nil { + log.Printf("%s %s: %s", c.name, cmd.Args, lsnerr) + } - _, _ = io.Copy(tw, r.Body) + var rmnerr error + if len(networksToReap) > 0 { + run := []string{"network", "rm", "-f"} + run = append(run, strings.Split(string(volumesToReap), "\n")...) + cmd = exec.Command("docker", run...) + rmnerr = cmd.Run() + if rmnerr != nil { + log.Printf("%s %s: %s", c.name, cmd.Args, rmnerr) + } + } - _ = tw.Close() - _ = stdin.Close() - _ = r.Body.Close() + if err != nil || lsverr != nil || rmverr != nil || lsnerr != nil || rmnerr != nil { + return fmt.Errorf("err: {%s}, lsverr: {%s}, rmverr: {%s}, lsnerr:{%s}, rmerr: {%s}", err, lsverr, rmverr, lsnerr, rmnerr) + } - return cmd.Wait() + if c.cancelWatch != nil { + c.cancelWatch() + eventWatch.ignore(c) + } + c.id = "" + return nil } -func guestDownload(id string, file string, w http.ResponseWriter) error { - cmd := exec.Command("docker", "exec", id, "tar", "Ccf", path.Dir(file), "-", path.Base(file)) - cmd.Stderr = os.Stderr - stdout, err := cmd.StdoutPipe() - if err != nil { - return err +// updated is a simple trigger allowing a caller to indicate that something has likely changed about the container +// and interested parties should re-inspect as needed. +func (c *container) updated() { + consolidationWindow := 250 * time.Millisecond + if d, err := time.ParseDuration(os.Getenv("VCSIM_EVENT_CONSOLIDATION_WINDOW")); err == nil { + consolidationWindow = d } - if err = cmd.Start(); err != nil { - return err + + select { + case c.changes <- struct{}{}: + time.Sleep(consolidationWindow) + // as this is only a hint to avoid waiting for the full inspect interval, we don't care about accumulating + // multiple triggers. We do pause to allow large numbers of sequential updates to consolidate + default: } +} - tr := tar.NewReader(stdout) - header, err := tr.Next() - if err != nil { - return err +// watchContainer monitors the underlying container and updates +// properties based on the container status. This occurs until either +// the container or the VM is removed. +// returns: +// +// err - uninitializedContainer error - if c.id is empty +func (c *container) watchContainer(ctx context.Context, updateFn func(*containerDetails, *container) error) error { + c.Lock() + defer c.Unlock() + + if c.id == "" { + return uninitializedContainer(errors.New("Attempt to watch uninitialized container")) } - w.Header().Set("Content-Length", strconv.FormatInt(header.Size, 10)) - _, _ = io.Copy(w, tr) + eventWatch.watch(c) + + cancelCtx, cancelFunc := context.WithCancel(ctx) + c.cancelWatch = cancelFunc - return cmd.Wait() + // Update the VM from the container at regular intervals until the done + // channel is closed. + go func() { + inspectInterval := 10 * time.Second + if d, err := time.ParseDuration(os.Getenv("VCSIM_INSPECT_INTERVAL")); err == nil { + inspectInterval = d + } + ticker := time.NewTicker(inspectInterval) + + update := func() { + _, details, err := c.inspect() + var rmErr error + var removing bool + if _, ok := err.(uninitializedContainer); ok { + removing = true + rmErr = c.remove(SpoofContext()) + } + + updateErr := updateFn(&details, c) + // if we don't succeed we want to re-try + if removing && rmErr == nil && updateErr == nil { + ticker.Stop() + return + } + if updateErr != nil { + log.Printf("vcsim container watch: %s %s", c.id, updateErr) + } + } + + for { + select { + case <-c.changes: + update() + case <-ticker.C: + update() + case <-cancelCtx.Done(): + return + } + } + }() + + return nil } -const guestPrefix = "/guestFile/" +func (w *eventWatcher) watch(c *container) { + w.Lock() + defer w.Unlock() -// ServeGuest handles container guest file upload/download -func ServeGuest(w http.ResponseWriter, r *http.Request) { - // Real vCenter form: /guestFile?id=139&token=... - // vcsim form: /guestFile/tmp/foo/bar?id=ebc8837b8cb6&token=... + if w.watches == nil { + w.watches = make(map[string]*container) + } - id := r.URL.Query().Get("id") - file := strings.TrimPrefix(r.URL.Path, guestPrefix[:len(guestPrefix)-1]) - var err error + w.watches[c.id] = c - switch r.Method { - case http.MethodPut: - err = guestUpload(id, file, r) - case http.MethodGet: - err = guestDownload(id, file, w) - default: - w.WriteHeader(http.StatusMethodNotAllowed) - return + if w.stdin == nil { + cmd := exec.Command("docker", "events", "--format", "'{{.ID}}'", "--filter", "Type=container") + w.stdout, _ = cmd.StdoutPipe() + w.stdin, _ = cmd.StdinPipe() + err := cmd.Start() + if err != nil { + log.Printf("docker event watcher: %s %s", cmd.Args, err) + w.stdin = nil + w.stdout = nil + w.process = nil + + return + } + + w.process = cmd.Process + + go w.monitor() } +} - if err != nil { - log.Printf("%s %s: %s", r.Method, r.URL, err) - w.WriteHeader(http.StatusInternalServerError) +func (w *eventWatcher) ignore(c *container) { + w.Lock() + + delete(w.watches, c.id) + + if len(w.watches) == 0 && w.stdin != nil { + w.stop() } + + w.Unlock() } -// productSerial returns the uuid in /sys/class/dmi/id/product_serial format -func productSerial(id uuid.UUID) string { - var dst [len(id)*2 + len(id) - 1]byte - - j := 0 - for i := 0; i < len(id); i++ { - hex.Encode(dst[j:j+2], id[i:i+1]) - j += 3 - if j < len(dst) { - s := j - 1 - if s == len(dst)/2 { - dst[s] = '-' - } else { - dst[s] = ' ' - } - } +func (w *eventWatcher) monitor() { + w.Lock() + watches := len(w.watches) + w.Unlock() + + if watches == 0 { + return } - return fmt.Sprintf("VMware-%s", string(dst[:])) + scanner := bufio.NewScanner(w.stdout) + for scanner.Scan() { + id := strings.TrimSpace(scanner.Text()) + + w.Lock() + container := w.watches[id] + w.Unlock() + + if container != nil { + // this is called in a routine to allow an event consolidation window + go container.updated() + } + } } -// productUUID returns the uuid in /sys/class/dmi/id/product_uuid format -func productUUID(id uuid.UUID) string { - var dst [36]byte - - hex.Encode(dst[0:2], id[3:4]) - hex.Encode(dst[2:4], id[2:3]) - hex.Encode(dst[4:6], id[1:2]) - hex.Encode(dst[6:8], id[0:1]) - dst[8] = '-' - hex.Encode(dst[9:11], id[5:6]) - hex.Encode(dst[11:13], id[4:5]) - dst[13] = '-' - hex.Encode(dst[14:16], id[7:8]) - hex.Encode(dst[16:18], id[6:7]) - dst[18] = '-' - hex.Encode(dst[19:23], id[8:10]) - dst[23] = '-' - hex.Encode(dst[24:], id[10:]) - - return strings.ToUpper(string(dst[:])) +func (w *eventWatcher) stop() { + if w.stdin != nil { + w.stdin.Close() + w.stdin = nil + } + if w.stdout != nil { + w.stdout.Close() + w.stdout = nil + } + w.process.Kill() } diff --git a/simulator/container_host_system.go b/simulator/container_host_system.go new file mode 100644 index 000000000..c3d283abb --- /dev/null +++ b/simulator/container_host_system.go @@ -0,0 +1,351 @@ +/* +Copyright (c) 2023-2023 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package simulator + +import ( + "fmt" + "strings" + + "github.com/vmware/govmomi/units" + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/types" +) + +const ( + advOptPrefixPnicToUnderlayPrefix = "RUN.underlay." + advOptContainerBackingImage = "RUN.container" + defaultUnderlayBridgeName = "vcsim-underlay" +) + +type simHost struct { + host *HostSystem + c *container +} + +// createSimHostMounts iterates over the provide filesystem mount info, creating docker volumes. It does _not_ delete volumes +// already created if creation of one fails. +// Returns: +// volume mounts: mount options suitable to pass directly to docker +// exec commands: a set of commands to run in the sim host after creation +// error: if construction of the above outputs fails +func createSimHostMounts(ctx *Context, containerName string, mounts []types.HostFileSystemMountInfo) ([]string, [][]string, error) { + var dockerVol []string + var symlinkCmds [][]string + + for i := range mounts { + info := &mounts[i] + name := info.Volume.GetHostFileSystemVolume().Name + + // NOTE: if we ever need persistence cross-invocation we can look at encoding the disk info as a label + labels := []string{"name=" + name, "container=" + containerName, deleteWithContainer} + dockerUuid, err := createVolume("", labels, nil) + if err != nil { + return nil, nil, err + } + + uuid := volumeIDtoHostVolumeUUID(dockerUuid) + name = strings.Replace(name, uuidToken, uuid, -1) + + switch vol := info.Volume.(type) { + case *types.HostVmfsVolume: + vol.BlockSizeMb = 1 + vol.BlockSize = units.KB + vol.UnmapGranularity = units.KB + vol.UnmapPriority = "low" + vol.MajorVersion = 6 + vol.Version = "6.82" + vol.Uuid = uuid + vol.HostFileSystemVolume.Name = name + for e := range vol.Extent { + vol.Extent[e].DiskName = "____simulated_volume_____" + if vol.Extent[e].Partition == 0 { + // HACK: this should be unique within the diskname, but for now this will suffice + // partitions start at 1 + vol.Extent[e].Partition = int32(e + 1) + } + } + vol.Ssd = types.NewBool(true) + vol.Local = types.NewBool(true) + case *types.HostVfatVolume: + vol.HostFileSystemVolume.Name = name + } + + info.VStorageSupport = "vStorageUnsupported" + + info.MountInfo.Path = "/vmfs/volumes/" + uuid + info.MountInfo.Mounted = types.NewBool(true) + info.MountInfo.Accessible = types.NewBool(true) + if info.MountInfo.AccessMode == "" { + info.MountInfo.AccessMode = "readWrite" + } + + opt := "rw" + if info.MountInfo.AccessMode == "readOnly" { + opt = "ro" + } + + dockerVol = append(dockerVol, fmt.Sprintf("%s:/vmfs/volumes/%s:%s", dockerUuid, uuid, opt)) + + // create symlinks from /vmfs/volumes/ for the Volume Name - the direct mount (path) is only the uuid + // ? can we do this via a script in the ESX image instead of via exec? + // ? are the volume names exposed in any manner inside the host? They must be because these mounts exist but where does that come from? Chicken and egg problem? ConfigStore? + symlinkCmds = append(symlinkCmds, []string{"ln", "-s", fmt.Sprintf("/vmfs/volumes/%s", uuid), fmt.Sprintf("/vmfs/volumes/%s", name)}) + if strings.HasPrefix(name, "OSDATA") { + symlinkCmds = append(symlinkCmds, []string{"mkdir", "-p", "/var/lib/vmware"}) + symlinkCmds = append(symlinkCmds, []string{"ln", "-s", fmt.Sprintf("/vmfs/volumes/%s", uuid), "/var/lib/vmware/osdata"}) + } + } + + return dockerVol, symlinkCmds, nil +} + +// createSimHostNetworks creates the networks for the host if not already created. Because we expect multiple hosts on the same network to act as a cluster +// it's likely that only the first host will create networks. +// This includes: +// * bridge network per-pNIC +// * bridge network per-DVS +// +// Returns: +// * array of networks to attach to +// * array of commands to run +// * error +// +// TODO: implement bridge network per DVS - not needed until container backed VMs are "created" on container backed "hosts" +func createSimHostNetworks(ctx *Context, containerName string, networkInfo *types.HostNetworkInfo, advOpts *OptionManager) ([]string, [][]string, error) { + var dockerNet []string + var cmds [][]string + + existingNets := make(map[string]string) + + // a pnic does not have an IP so this is purely a connectivity statement, not a network identity, however this is not how docker works + // so we're going to end up with a veth (our pnic) that does have an IP assigned. That IP will end up being used in a NetConfig structure associated + // with the pNIC. See HostSystem.getNetConfigInterface. + for i := range networkInfo.Pnic { + pnicName := networkInfo.Pnic[i].Device + + bridge := getPnicUnderlay(advOpts, pnicName) + + if pnic, attached := existingNets[bridge]; attached { + return nil, nil, fmt.Errorf("cannot attach multiple pNICs to the same underlay: %s and %s both attempting to connect to %s for %s", pnic, pnicName, bridge, containerName) + } + + _, err := createBridge(bridge) + if err != nil { + return nil, nil, err + } + + dockerNet = append(dockerNet, bridge) + existingNets[bridge] = pnicName + } + + return dockerNet, cmds, nil +} + +func getPnicUnderlay(advOpts *OptionManager, pnicName string) string { + queryRes := advOpts.QueryOptions(&types.QueryOptions{Name: advOptPrefixPnicToUnderlayPrefix + pnicName}).(*methods.QueryOptionsBody).Res + return queryRes.Returnval[0].GetOptionValue().Value.(string) +} + +// createSimulationHostcreates a simHost binding if the host.ConfigManager.AdvancedOption set contains a key "RUN.container". +// If the set does not contain that key, this returns nil. +// Methods on the simHost type are written to check for nil object so the return from this call can be blindly +// assigned and invoked without the caller caring about whether a binding for a backing container was warranted. +// +// The created simhost is based off of the details of the supplied host system. +// VMFS locations are created based on FileSystemMountInfo +// Bridge networks are created to simulate underlay networks - one per pNIC. You cannot connect two pNICs to the same underlay. +// +// On Network connectivity - initially this is using docker network constructs. This means we cannot easily use nested "ip netns" so we cannot +// have a perfect representation of the ESX structure: pnic(veth)->vswtich(bridge)->{vmk,vnic}(veth) +// Instead we have the following: +// * bridge network per underlay - everything connects directly to the underlay +// * VMs/CRXs connect to the underlay dictated by the Uplink pNIC attached to their vSwitch +// * hostd vmknic gets the "host" container IP - we don't currently support multiple vmknics with different IPs +// * no support for mocking VLANs +func createSimulationHost(ctx *Context, host *HostSystem) (*simHost, error) { + sh := &simHost{ + host: host, + } + + advOpts := ctx.Map.Get(host.ConfigManager.AdvancedOption.Reference()).(*OptionManager) + fault := advOpts.QueryOptions(&types.QueryOptions{Name: "RUN.container"}).(*methods.QueryOptionsBody).Fault() + if fault != nil { + if _, ok := fault.VimFault().(*types.InvalidName); ok { + return nil, nil + } + return nil, fmt.Errorf("errror retrieving container backing from host config manager: %+v", fault.VimFault()) + } + + // assemble env + var dockerEnv []string + + var execCmds [][]string + + var err error + + hName := host.Summary.Config.Name + hUuid := host.Summary.Hardware.Uuid + containerName := constructContainerName(hName, hUuid) + + // create volumes and mounts + dockerVol, volCmds, err := createSimHostMounts(ctx, containerName, host.Config.FileSystemVolume.MountInfo) + if err != nil { + return nil, err + } + execCmds = append(execCmds, volCmds...) + + // create networks + dockerNet, netCmds, err := createSimHostNetworks(ctx, containerName, host.Config.Network, advOpts) + if err != nil { + return nil, err + } + execCmds = append(execCmds, netCmds...) + + // create the container + sh.c, err = create(ctx, hName, hUuid, dockerNet, dockerVol, nil, dockerEnv, "alpine", []string{"sleep", "infinity"}) + if err != nil { + return nil, err + } + + // start the container + err = sh.c.start(ctx) + if err != nil { + return nil, err + } + + // run post-creation steps + for _, cmd := range execCmds { + _, err := sh.c.exec(ctx, cmd) + if err != nil { + return nil, err + } + } + + _, detail, err := sh.c.inspect() + + for i := range host.Config.Network.Pnic { + pnic := &host.Config.Network.Pnic[i] + bridge := getPnicUnderlay(advOpts, pnic.Device) + settings := detail.NetworkSettings.Networks[bridge] + + // it doesn't really make sense at an ESX level to set this information as IP bindings are associated with + // vnics (VMs) or vmknics (daemons such as hostd). + // However it's a useful location to stash this info in a manner that can be retrieved at a later date. + pnic.Spec.Ip.IpAddress = settings.IPAddress + pnic.Spec.Ip.SubnetMask = prefixToMask(settings.IPPrefixLen) + + pnic.Mac = settings.MacAddress + } + + // update the active "management" nicType with the container IP for vmnic0 + netconfig, err := host.getNetConfigInterface(ctx, "management") + if err != nil { + return nil, err + } + netconfig.vmk.Spec.Ip.IpAddress = netconfig.uplink.Spec.Ip.IpAddress + netconfig.vmk.Spec.Ip.SubnetMask = netconfig.uplink.Spec.Ip.SubnetMask + netconfig.vmk.Spec.Mac = netconfig.uplink.Mac + + return sh, nil +} + +// remove destroys the container associated with the host and any volumes with labels specifying their lifecycle +// is coupled with the container +func (sh *simHost) remove(ctx *Context) error { + if sh == nil { + return nil + } + + return sh.c.remove(ctx) +} + +// volumeIDtoHostVolumeUUID takes the 64 char docker uuid and converts it into a 32char ESX form of 8-8-4-12 +// Perhaps we should do this using an md5 rehash, but instead we just take the first 32char for ease of cross-reference. +func volumeIDtoHostVolumeUUID(id string) string { + return fmt.Sprintf("%s-%s-%s-%s", id[0:8], id[8:16], id[16:20], id[20:32]) +} + +// By reference to physical system, partition numbering tends to work out like this: +// 1. EFI System (100 MB) +// Free space (1.97 MB) +// 5. Basic Data (4 GB) (bootbank1) +// 6. Basic Data (4 GB) (bootbank2) +// 7. VMFSL (119.9 GB) (os-data) +// 8. VMFS (1 TB) (datastore1) +// I assume the jump from 1 -> 5 harks back to the primary/logical partitions from MBT days +const uuidToken = "%__UUID__%" + +var defaultSimVolumes = []types.HostFileSystemMountInfo{ + { + MountInfo: types.HostMountInfo{ + AccessMode: "readWrite", + }, + Volume: &types.HostVmfsVolume{ + HostFileSystemVolume: types.HostFileSystemVolume{ + Type: "VMFS", + Name: "datastore1", + Capacity: 1 * units.TB, + }, + Extent: []types.HostScsiDiskPartition{ + { + Partition: 8, + }, + }, + }, + }, + { + MountInfo: types.HostMountInfo{ + AccessMode: "readWrite", + }, + Volume: &types.HostVmfsVolume{ + HostFileSystemVolume: types.HostFileSystemVolume{ + Type: "OTHER", + Name: "OSDATA-%__UUID__%", + Capacity: 128 * units.GB, + }, + Extent: []types.HostScsiDiskPartition{ + { + Partition: 7, + }, + }, + }, + }, + { + MountInfo: types.HostMountInfo{ + AccessMode: "readOnly", + }, + Volume: &types.HostVfatVolume{ + HostFileSystemVolume: types.HostFileSystemVolume{ + Type: "OTHER", + Name: "BOOTBANK1", + Capacity: 4 * units.GB, + }, + }, + }, + { + MountInfo: types.HostMountInfo{ + AccessMode: "readOnly", + }, + Volume: &types.HostVfatVolume{ + HostFileSystemVolume: types.HostFileSystemVolume{ + Type: "OTHER", + Name: "BOOTBANK2", + Capacity: 4 * units.GB, + }, + }, + }, +} diff --git a/simulator/container_host_system_test.go b/simulator/container_host_system_test.go new file mode 100644 index 000000000..1233bc97b --- /dev/null +++ b/simulator/container_host_system_test.go @@ -0,0 +1,226 @@ +/* +Copyright (c) 2023-2023 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package simulator + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + + "github.com/vmware/govmomi/simulator/esx" + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/types" +) + +func TestHostOptionManager(t *testing.T) { + m := ESX() + + defer m.Remove() + + err := m.Create() + if err != nil { + t.Fatal(err) + } + + hs := NewHostSystem(esx.HostSystem) + + advOpts, ok := Map.Get(hs.ConfigManager.AdvancedOption.Reference()).(*OptionManager) + require.True(t, ok, "Expected to inflate OptionManager from reference") + + option := &types.OptionValue{ + Key: "TEST.hello", + Value: "world", + } + + fault := advOpts.QueryOptions(&types.QueryOptions{Name: option.Key}).(*methods.QueryOptionsBody).Fault() + require.IsType(t, &types.InvalidName{}, fault.VimFault(), "Expected new host from template not to have test option set") + + fault = advOpts.UpdateOptions(&types.UpdateOptions{ChangedValue: []types.BaseOptionValue{option}}).Fault() + require.Nil(t, fault, "Expected setting test option to succeed") + + queryRes := advOpts.QueryOptions(&types.QueryOptions{Name: option.Key}).(*methods.QueryOptionsBody).Res + require.Equal(t, 1, len(queryRes.Returnval), "Expected query of set option to succeed") + require.Equal(t, option.Value, queryRes.Returnval[0].GetOptionValue().Value, "Expected set value") + + option2 := &types.OptionValue{ + Key: "TEST.hello", + Value: "goodbye", + } + + fault = advOpts.UpdateOptions(&types.UpdateOptions{ChangedValue: []types.BaseOptionValue{option2}}).Fault() + require.Nil(t, fault, "Expected update of test option to succeed") + + queryRes = advOpts.QueryOptions(&types.QueryOptions{Name: option2.Key}).(*methods.QueryOptionsBody).Res + require.Equal(t, 1, len(queryRes.Returnval), "Expected query of updated option to succeed") + require.Equal(t, option2.Value, queryRes.Returnval[0].GetOptionValue().Value, "Expected updated value") + + hs.configure(SpoofContext(), types.HostConnectSpec{}, true) + assert.Nil(t, hs.sh, "Expected not to have container backing if not requested") +} + +func TestSyncWithOptionsStruct(t *testing.T) { + m := ESX() + + defer m.Remove() + + err := m.Create() + if err != nil { + t.Fatal(err) + } + + hs := NewHostSystem(esx.HostSystem) + + advOpts, ok := Map.Get(hs.ConfigManager.AdvancedOption.Reference()).(*OptionManager) + require.True(t, ok, "Expected to inflate OptionManager from reference") + + option := &types.OptionValue{ + Key: "TEST.hello", + Value: "world", + } + + fault := advOpts.UpdateOptions(&types.UpdateOptions{ChangedValue: []types.BaseOptionValue{option}}).Fault() + require.Nil(t, fault, "Expected setting test option to succeed") + + assert.Equal(t, option, hs.Config.Option[1], "Expected mirror to reflect changes") +} + +func TestPerHostOptionManager(t *testing.T) { + m := ESX() + + defer m.Remove() + + err := m.Create() + if err != nil { + t.Fatal(err) + } + + hs := NewHostSystem(esx.HostSystem) + hs2 := NewHostSystem(esx.HostSystem) + + advOpts, ok := Map.Get(hs.ConfigManager.AdvancedOption.Reference()).(*OptionManager) + require.True(t, ok, "Expected to inflate OptionManager from reference") + + advOpts2 := Map.Get(hs2.ConfigManager.AdvancedOption.Reference()).(*OptionManager) + + option := &types.OptionValue{ + Key: "TEST.hello", + Value: "world", + } + + fault := advOpts.QueryOptions(&types.QueryOptions{Name: option.Key}).(*methods.QueryOptionsBody).Fault() + require.IsType(t, &types.InvalidName{}, fault.VimFault(), "Expected host from template not to have test option set") + + fault = advOpts.UpdateOptions(&types.UpdateOptions{ChangedValue: []types.BaseOptionValue{option}}).Fault() + require.Nil(t, fault, "Expected setting test option to succeed") + + queryRes := advOpts.QueryOptions(&types.QueryOptions{Name: option.Key}).(*methods.QueryOptionsBody).Res + require.Equal(t, 1, len(queryRes.Returnval), "Expected query of set option to succeed") + require.Equal(t, option.Value, queryRes.Returnval[0].GetOptionValue().Value, "Expected set value") + + fault = advOpts2.QueryOptions(&types.QueryOptions{Name: option.Key}).(*methods.QueryOptionsBody).Fault() + require.IsType(t, &types.InvalidName{}, fault.VimFault(), "Expected second host to be unchanged") + + option2 := &types.OptionValue{ + Key: "TEST.hello", + Value: "goodbye", + } + + fault = advOpts.UpdateOptions(&types.UpdateOptions{ChangedValue: []types.BaseOptionValue{option2}}).Fault() + require.Nil(t, fault, "Expected update of test option to succeed") + + queryRes = advOpts.QueryOptions(&types.QueryOptions{Name: option2.Key}).(*methods.QueryOptionsBody).Res + require.Equal(t, 1, len(queryRes.Returnval), "Expected query of updated option to succeed") + require.Equal(t, option2.Value, queryRes.Returnval[0].GetOptionValue().Value, "Expected updated value") + + assert.Equal(t, option2, hs.Config.Option[1], "Expected mirror to reflect changes") + + hs.configure(SpoofContext(), types.HostConnectSpec{}, true) + assert.Nil(t, hs.sh, "Expected not to have container backing if not requested") + + hs3 := NewHostSystem(esx.HostSystem) + + advOpts3 := Map.Get(hs3.ConfigManager.AdvancedOption.Reference()).(*OptionManager) + fault = advOpts3.QueryOptions(&types.QueryOptions{Name: option.Key}).(*methods.QueryOptionsBody).Fault() + require.IsType(t, &types.InvalidName{}, fault.VimFault(), "Expected host created after update not to inherit change") + +} + +func TestHostContainerBacking(t *testing.T) { + m := ESX() + + defer m.Remove() + + err := m.Create() + if err != nil { + t.Fatal(err) + } + + ctx := SpoofContext() + + hs := NewHostSystem(esx.HostSystem) + hs.configureContainerBacking(ctx, "alpine", defaultSimVolumes, "vcsim-mgmt-underlay") + + details, err := hs.getNetConfigInterface(ctx, "management") + assert.NoError(t, err, "Expected no error from management netconfig check") + assert.Equal(t, "0.0.0.0", details.vmk.Spec.Ip.IpAddress, "Expected IP to be empty prior to container creation") + + hs.configure(ctx, types.HostConnectSpec{}, true) + + assert.NoError(t, err, "Expected no error from management netconfig check") + assert.NotEqual(t, "0.0.0.0", details.vmk.Spec.Ip.IpAddress, "Expected management IP to set after container creation") + + hs.sh.remove(ctx) +} + +func TestMultipleSimHost(t *testing.T) { + m := ESX() + + defer m.Remove() + + err := m.Create() + require.Nil(t, err, "expected successful creation of model") + + ctx := SpoofContext() + + hs := NewHostSystem(esx.HostSystem) + hs.configureContainerBacking(ctx, "alpine", defaultSimVolumes) + + hs2 := NewHostSystem(esx.HostSystem) + hs2.configureContainerBacking(ctx, "alpine", defaultSimVolumes) + + details, err := hs.getNetConfigInterface(ctx, "management") + assert.NoError(t, err, "Expected no error from management netconfig check") + assert.Equal(t, "0.0.0.0", details.vmk.Spec.Ip.IpAddress, "Expected IP to be empty prior to container creation") + + hs.configure(ctx, types.HostConnectSpec{}, true) + + details2, err := hs2.getNetConfigInterface(ctx, "management") + assert.NoError(t, err, "Expected no error from management netconfig check") + assert.Equal(t, "0.0.0.0", details2.vmk.Spec.Ip.IpAddress, "Expected IP to be empty prior to container creation") + + hs2.configure(ctx, types.HostConnectSpec{}, true) + + assert.NotEqual(t, details.vmk.Spec.Ip.IpAddress, details2.vmk.Spec.Ip.IpAddress, "Expected hosts to get different IPs") + + hs.sh.remove(ctx) + + // TODO: assert one container plus volumes left - need to wait for + // https://github.com/containers/podman/issues/19219 to be fixed for podman to work - otherwise all volumes get removed + // with the first host removed + hs2.sh.remove(ctx) +} diff --git a/simulator/container_virtual_machine.go b/simulator/container_virtual_machine.go new file mode 100644 index 000000000..a2b91fd86 --- /dev/null +++ b/simulator/container_virtual_machine.go @@ -0,0 +1,511 @@ +/* +Copyright (c) 2023-2023 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package simulator + +import ( + "archive/tar" + "context" + "encoding/hex" + "encoding/json" + "errors" + "fmt" + "io" + "log" + "net/http" + "strconv" + "strings" + + "github.com/google/uuid" + + "github.com/vmware/govmomi/vim25/methods" + "github.com/vmware/govmomi/vim25/types" +) + +const ContainerBackingOptionKey = "RUN.container" + +var ( + toolsRunning = []types.PropertyChange{ + {Name: "guest.toolsStatus", Val: types.VirtualMachineToolsStatusToolsOk}, + {Name: "guest.toolsRunningStatus", Val: string(types.VirtualMachineToolsRunningStatusGuestToolsRunning)}, + } + + toolsNotRunning = []types.PropertyChange{ + {Name: "guest.toolsStatus", Val: types.VirtualMachineToolsStatusToolsNotRunning}, + {Name: "guest.toolsRunningStatus", Val: string(types.VirtualMachineToolsRunningStatusGuestToolsNotRunning)}, + } +) + +type simVM struct { + vm *VirtualMachine + c *container +} + +// createSimulationVM inspects the provided VirtualMachine and creates a simVM binding for it if +// the vm.Config.ExtraConfig set contains a key "RUN.container". +// If the ExtraConfig set does not contain that key, this returns nil. +// Methods on the simVM type are written to check for nil object so the return from this call can be blindly +// assigned and invoked without the caller caring about whether a binding for a backing container was warranted. +func createSimulationVM(vm *VirtualMachine) *simVM { + svm := &simVM{ + vm: vm, + } + + for _, opt := range vm.Config.ExtraConfig { + val := opt.GetOptionValue() + if val.Key == ContainerBackingOptionKey { + return svm + } + } + + return nil +} + +// applies container network settings to vm.Guest properties. +func (svm *simVM) syncNetworkConfigToVMGuestProperties() error { + if svm == nil { + return nil + } + + out, detail, err := svm.c.inspect() + if err != nil { + return err + } + + svm.vm.Config.Annotation = "inspect" + svm.vm.logPrintf("%s: %s", svm.vm.Config.Annotation, string(out)) + + netS := detail.NetworkSettings.networkSettings + + // ? Why is this valid - we're taking the first entry while iterating over a MAP + for _, n := range detail.NetworkSettings.Networks { + netS = n + break + } + + if detail.State.Paused { + svm.vm.Runtime.PowerState = types.VirtualMachinePowerStateSuspended + } else if detail.State.Running { + svm.vm.Runtime.PowerState = types.VirtualMachinePowerStatePoweredOn + } else { + svm.vm.Runtime.PowerState = types.VirtualMachinePowerStatePoweredOff + } + + svm.vm.Guest.IpAddress = netS.IPAddress + svm.vm.Summary.Guest.IpAddress = netS.IPAddress + + if len(svm.vm.Guest.Net) != 0 { + net := &svm.vm.Guest.Net[0] + net.IpAddress = []string{netS.IPAddress} + net.MacAddress = netS.MacAddress + net.IpConfig = &types.NetIpConfigInfo{ + IpAddress: []types.NetIpConfigInfoIpAddress{{ + IpAddress: netS.IPAddress, + PrefixLength: int32(netS.IPPrefixLen), + State: string(types.NetIpConfigInfoIpAddressStatusPreferred), + }}, + } + } + + for _, d := range svm.vm.Config.Hardware.Device { + if eth, ok := d.(types.BaseVirtualEthernetCard); ok { + eth.GetVirtualEthernetCard().MacAddress = netS.MacAddress + break + } + } + + return nil +} + +func (svm *simVM) prepareGuestOperation(auth types.BaseGuestAuthentication) types.BaseMethodFault { + if svm != nil && (svm.c == nil || svm.c.id == "") { + return new(types.GuestOperationsUnavailable) + } + + if svm.vm.Runtime.PowerState != types.VirtualMachinePowerStatePoweredOn { + return &types.InvalidPowerState{ + RequestedState: types.VirtualMachinePowerStatePoweredOn, + ExistingState: svm.vm.Runtime.PowerState, + } + } + + switch creds := auth.(type) { + case *types.NamePasswordAuthentication: + if creds.Username == "" || creds.Password == "" { + return new(types.InvalidGuestLogin) + } + default: + return new(types.InvalidGuestLogin) + } + + return nil +} + +// populateDMI writes BIOS UUID DMI files to a container volume +func (svm *simVM) populateDMI() error { + if svm.c == nil { + return nil + } + + files := []tarEntry{ + { + &tar.Header{ + Name: "product_uuid", + Mode: 0444, + }, + []byte(productUUID(svm.vm.uid)), + }, + { + &tar.Header{ + Name: "product_serial", + Mode: 0444, + }, + []byte(productSerial(svm.vm.uid)), + }, + } + + _, err := svm.c.createVolume("dmi", []string{deleteWithContainer}, files) + return err +} + +// start runs the container if specified by the RUN.container extraConfig property. +// lazily creates a container backing if specified by an ExtraConfig property with key "RUN.container" +func (svm *simVM) start(ctx *Context) error { + if svm == nil { + return nil + } + + if svm.c != nil && svm.c.id != "" { + err := svm.c.start(ctx) + if err != nil { + log.Printf("%s %s: %s", svm.vm.Name, "start", err) + } else { + ctx.Map.Update(svm.vm, toolsRunning) + } + + return err + } + + var args []string + var env []string + var ports []string + mountDMI := true + + for _, opt := range svm.vm.Config.ExtraConfig { + val := opt.GetOptionValue() + if val.Key == ContainerBackingOptionKey { + run := val.Value.(string) + err := json.Unmarshal([]byte(run), &args) + if err != nil { + args = []string{run} + } + + continue + } + + if val.Key == "RUN.mountdmi" { + var mount bool + err := json.Unmarshal([]byte(val.Value.(string)), &mount) + if err == nil { + mountDMI = mount + } + + continue + } + + if strings.HasPrefix(val.Key, "RUN.port.") { + // ? would this not make more sense as a set of tuples in the value? + // or inlined into the RUN.container freeform string as is the case with the nginx volume in the examples? + sKey := strings.Split(val.Key, ".") + containerPort := sKey[len(sKey)-1] + ports = append(ports, fmt.Sprintf("%s:%s", val.Value.(string), containerPort)) + + continue + } + + if strings.HasPrefix(val.Key, "RUN.env.") { + sKey := strings.Split(val.Key, ".") + envKey := sKey[len(sKey)-1] + env = append(env, fmt.Sprintf("%s=%s", envKey, val.Value.(string))) + } + + if strings.HasPrefix(val.Key, "guestinfo.") { + key := strings.Replace(strings.ToUpper(val.Key), ".", "_", -1) + env = append(env, fmt.Sprintf("VMX_%s=%s", key, val.Value.(string))) + + continue + } + } + + if len(args) == 0 { + // not an error - it's simply a simVM that shouldn't be backed by a container + return nil + } + + if len(env) != 0 { + // Configure env as the data access method for cloud-init-vmware-guestinfo + env = append(env, "VMX_GUESTINFO=true") + } + + volumes := []string{} + if mountDMI { + volumes = append(volumes, constructVolumeName(svm.vm.Name, svm.vm.uid.String(), "dmi")+":/sys/class/dmi/id") + } + + var err error + svm.c, err = create(ctx, svm.vm.Name, svm.vm.uid.String(), nil, volumes, ports, env, args[0], args[1:]) + if err != nil { + return err + } + + if mountDMI { + // not combined with the test assembling volumes because we want to have the container name first. + // cannot add a label to a volume after creation, so if we want to associate with the container ID the + // container must come first + err = svm.populateDMI() + if err != nil { + return err + } + } + + err = svm.c.start(ctx) + if err != nil { + log.Printf("%s %s: %s %s", svm.vm.Name, "start", args, err) + return err + } + + ctx.Map.Update(svm.vm, toolsRunning) + + svm.vm.logPrintf("%s: %s", args, svm.c.id) + + if err = svm.syncNetworkConfigToVMGuestProperties(); err != nil { + log.Printf("%s inspect %s: %s", svm.vm.Name, svm.c.id, err) + } + + callback := func(details *containerDetails, c *container) error { + spoofctx := SpoofContext() + + if c.id == "" && svm.vm != nil { + // If the container cannot be found then destroy this VM unless the VM is no longer configured for container backing (svm.vm == nil) + taskRef := svm.vm.DestroyTask(spoofctx, &types.Destroy_Task{This: svm.vm.Self}).(*methods.Destroy_TaskBody).Res.Returnval + task, ok := spoofctx.Map.Get(taskRef).(*Task) + if !ok { + panic(fmt.Sprintf("couldn't retrieve task for moref %+q while deleting VM %s", taskRef, svm.vm.Name)) + } + + // Wait for the task to complete and see if there is an error. + task.Wait() + if task.Info.Error != nil { + msg := fmt.Sprintf("failed to destroy vm: err=%v", *task.Info.Error) + svm.vm.logPrintf(msg) + + return errors.New(msg) + } + } + + return svm.syncNetworkConfigToVMGuestProperties() + } + + // Start watching the container resource. + err = svm.c.watchContainer(context.Background(), callback) + if _, ok := err.(uninitializedContainer); ok { + // the container has been deleted before we could watch, despite successful launch so clean up. + callback(nil, svm.c) + + // successful launch so nil the error + return nil + } + + return err +} + +// stop the container (if any) for the given vm. +func (svm *simVM) stop(ctx *Context) error { + if svm == nil || svm.c == nil { + return nil + } + + err := svm.c.stop(ctx) + if err != nil { + log.Printf("%s %s: %s", svm.vm.Name, "stop", err) + + return err + } + + ctx.Map.Update(svm.vm, toolsNotRunning) + + return nil +} + +// pause the container (if any) for the given vm. +func (svm *simVM) pause(ctx *Context) error { + if svm == nil || svm.c == nil { + return nil + } + + err := svm.c.pause(ctx) + if err != nil { + log.Printf("%s %s: %s", svm.vm.Name, "pause", err) + + return err + } + + ctx.Map.Update(svm.vm, toolsNotRunning) + + return nil +} + +// restart the container (if any) for the given vm. +func (svm *simVM) restart(ctx *Context) error { + if svm == nil || svm.c == nil { + return nil + } + + err := svm.c.restart(ctx) + if err != nil { + log.Printf("%s %s: %s", svm.vm.Name, "restart", err) + + return err + } + + ctx.Map.Update(svm.vm, toolsRunning) + + return nil +} + +// remove the container (if any) for the given vm. +func (svm *simVM) remove(ctx *Context) error { + if svm == nil || svm.c == nil { + return nil + } + + err := svm.c.remove(ctx) + if err != nil { + log.Printf("%s %s: %s", svm.vm.Name, "remove", err) + + return err + } + + return nil +} + +func (svm *simVM) exec(ctx *Context, auth types.BaseGuestAuthentication, args []string) (string, types.BaseMethodFault) { + if svm == nil || svm.c == nil { + return "", nil + } + + fault := svm.prepareGuestOperation(auth) + if fault != nil { + return "", fault + } + + out, err := svm.c.exec(ctx, args) + if err != nil { + log.Printf("%s: %s (%s)", svm.vm.Name, args, string(out)) + return "", new(types.GuestOperationsFault) + } + + return strings.TrimSpace(string(out)), nil +} + +func guestUpload(id string, file string, r *http.Request) error { + // TODO: decide behaviour for no container + err := copyToGuest(id, file, r.ContentLength, r.Body) + _ = r.Body.Close() + return err +} + +func guestDownload(id string, file string, w http.ResponseWriter) error { + // TODO: decide behaviour for no container + sink := func(len int64, r io.Reader) error { + w.Header().Set("Content-Length", strconv.FormatInt(len, 10)) + _, err := io.Copy(w, r) + return err + } + + err := copyFromGuest(id, file, sink) + return err +} + +const guestPrefix = "/guestFile/" + +// ServeGuest handles container guest file upload/download +func ServeGuest(w http.ResponseWriter, r *http.Request) { + // Real vCenter form: /guestFile?id=139&token=... + // vcsim form: /guestFile/tmp/foo/bar?id=ebc8837b8cb6&token=... + + id := r.URL.Query().Get("id") + file := strings.TrimPrefix(r.URL.Path, guestPrefix[:len(guestPrefix)-1]) + var err error + + switch r.Method { + case http.MethodPut: + err = guestUpload(id, file, r) + case http.MethodGet: + err = guestDownload(id, file, w) + default: + w.WriteHeader(http.StatusMethodNotAllowed) + return + } + + if err != nil { + log.Printf("%s %s: %s", r.Method, r.URL, err) + w.WriteHeader(http.StatusInternalServerError) + } +} + +// productSerial returns the uuid in /sys/class/dmi/id/product_serial format +func productSerial(id uuid.UUID) string { + var dst [len(id)*2 + len(id) - 1]byte + + j := 0 + for i := 0; i < len(id); i++ { + hex.Encode(dst[j:j+2], id[i:i+1]) + j += 3 + if j < len(dst) { + s := j - 1 + if s == len(dst)/2 { + dst[s] = '-' + } else { + dst[s] = ' ' + } + } + } + + return fmt.Sprintf("VMware-%s", string(dst[:])) +} + +// productUUID returns the uuid in /sys/class/dmi/id/product_uuid format +func productUUID(id uuid.UUID) string { + var dst [36]byte + + hex.Encode(dst[0:2], id[3:4]) + hex.Encode(dst[2:4], id[2:3]) + hex.Encode(dst[4:6], id[1:2]) + hex.Encode(dst[6:8], id[0:1]) + dst[8] = '-' + hex.Encode(dst[9:11], id[5:6]) + hex.Encode(dst[11:13], id[4:5]) + dst[13] = '-' + hex.Encode(dst[14:16], id[7:8]) + hex.Encode(dst[16:18], id[6:7]) + dst[18] = '-' + hex.Encode(dst[19:23], id[8:10]) + dst[23] = '-' + hex.Encode(dst[24:], id[10:]) + + return strings.ToUpper(string(dst[:])) +} diff --git a/simulator/container_virtual_machine_test.go b/simulator/container_virtual_machine_test.go new file mode 100644 index 000000000..de32bd375 --- /dev/null +++ b/simulator/container_virtual_machine_test.go @@ -0,0 +1,259 @@ +/* +Copyright (c) 2023-2023 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package simulator + +import ( + "bytes" + "context" + "fmt" + "log" + "os" + "os/exec" + "path/filepath" + "strings" + "testing" + + "github.com/stretchr/testify/require" + + "github.com/vmware/govmomi/find" + "github.com/vmware/govmomi/object" + "github.com/vmware/govmomi/vim25" + "github.com/vmware/govmomi/vim25/types" +) + +// takes a content string to serve from the container and returns ExtraConfig options +// to construct container +// content - the contents of index.html +// port - the port to forward to the container port 80 +func constructNginxBacking(t *testing.T, content string, port int) []types.BaseOptionValue { + dir := t.TempDir() + // experience shows that a parent directory created as part of the TempDir call may not have + // o+rx, preventing use within a container that doesn't have the same uid + for dirpart := dir; dirpart != "/"; dirpart = filepath.Dir(dirpart) { + os.Chmod(dirpart, 0755) + stat, err := os.Stat(dirpart) + require.Nil(t, err, "must be able to check file and directory permissions") + require.NotZero(t, stat.Mode()&0005, "does not have o+rx permissions", dirpart) + } + + fpath := filepath.Join(dir, "index.html") + err := os.WriteFile(fpath, []byte(content), 0644) + require.Nil(t, err, "Expected to cleanly write content to file: %s", err) + + // just in case umask gets in the way + err = os.Chmod(fpath, 0644) + require.Nil(t, err, "Expected to cleanly set file permissions on content: %s", err) + + args := fmt.Sprintf("-v '%s:/usr/share/nginx/html:ro' nginx", dir) + + return []types.BaseOptionValue{ + &types.OptionValue{Key: ContainerBackingOptionKey, Value: args}, // run nginx + &types.OptionValue{Key: "RUN.port.80", Value: "8888"}, // test port remap + } +} + +// validates the VM is serving the expected content on the expected ports +// pairs with constructNginxBacking +func validateNginxContainer(t *testing.T, vm *object.VirtualMachine, expected string, port int) error { + ip, _ := vm.WaitForIP(context.Background(), true) // Returns the docker container's IP + + // Count the number of bytes in feature_test.go via nginx going direct to the container + cmd := exec.Command("docker", "run", "--rm", "curlimages/curl", "curl", "-f", fmt.Sprintf("http://%s:80", ip)) + var buf bytes.Buffer + cmd.Stdout = &buf + err := cmd.Run() + res := buf.String() + + if err != nil || strings.TrimSpace(res) != expected { + // we use Fail not Fatal because we want to clean up + t.Fail() + t.Log(err, buf.String()) + fmt.Printf("%d diff", buf.Len()-len(expected)) + } + + // Count the number of bytes in feature_test.go via nginx going via port remap on host + cmd = exec.Command("curl", "-f", fmt.Sprintf("http://localhost:%d", port)) + buf.Reset() + cmd.Stdout = &buf + err = cmd.Run() + res = buf.String() + if err != nil || strings.TrimSpace(res) != expected { + t.Fail() + t.Log(err, buf.String()) + fmt.Printf("%d diff", buf.Len()-len(expected)) + } + + return nil +} + +// 1. Construct ExtraConfig args for container backing +// 2. Create VM using that ExtraConfig +// 3. Confirm docker container present that matches expectations +func TestCreateVMWithContainerBacking(t *testing.T) { + Test(func(ctx context.Context, c *vim25.Client) { + if _, err := exec.LookPath("docker"); err != nil { + fmt.Println("0 diff") + t.Skip("docker client binary not on PATH") + return + } + + finder := find.NewFinder(c) + pool, _ := finder.ResourcePool(ctx, "DC0_H0/Resources") + dc, err := finder.Datacenter(ctx, "DC0") + if err != nil { + log.Fatal(err) + } + + content := "foo" + port := 8888 + + spec := types.VirtualMachineConfigSpec{ + Name: "nginx-container-backed-from-creation", + Files: &types.VirtualMachineFileInfo{ + VmPathName: "[LocalDS_0] nginx", + }, + ExtraConfig: constructNginxBacking(t, content, port), + } + + f, _ := dc.Folders(ctx) + // Create a new VM + task, err := f.VmFolder.CreateVM(ctx, spec, pool, nil) + if err != nil { + log.Fatal(err) + } + + info, err := task.WaitForResult(ctx, nil) + if err != nil { + log.Fatal(err) + } + + vm := object.NewVirtualMachine(c, info.Result.(types.ManagedObjectReference)) + // PowerOn VM starts the nginx container + task, _ = vm.PowerOn(ctx) + err = task.Wait(ctx) + if err != nil { + log.Fatal(err) + } + + err = validateNginxContainer(t, vm, content, port) + if err != nil { + log.Fatal(err) + } + + spec2 := types.VirtualMachineConfigSpec{ + ExtraConfig: []types.BaseOptionValue{ + &types.OptionValue{Key: ContainerBackingOptionKey, Value: ""}, + }, + } + + task, err = vm.Reconfigure(ctx, spec2) + if err != nil { + log.Fatal(err) + } + + info, err = task.WaitForResult(ctx, nil) + if err != nil { + log.Fatal(info, err) + } + + // PowerOff stops the container + task, _ = vm.PowerOff(ctx) + _ = task.Wait(ctx) + // Destroy deletes the container + task, _ = vm.Destroy(ctx) + _ = task.Wait(ctx) + }) + // Output: 0 diff +} + +// 1. Create VM without ExtraConfig args for container backing +// 2. Construct ExtraConfig args for container backing +// 3. Update VM with ExtraConfig +// 4. Confirm docker container present that matches expectations +func TestUpdateVMAddContainerBacking(t *testing.T) { + Test(func(ctx context.Context, c *vim25.Client) { + if _, err := exec.LookPath("docker"); err != nil { + fmt.Println("0 diff") + t.Skip("docker client binary not on PATH") + return + } + + finder := find.NewFinder(c) + pool, _ := finder.ResourcePool(ctx, "DC0_H0/Resources") + dc, err := finder.Datacenter(ctx, "DC0") + if err != nil { + log.Fatal(err) + } + + content := "foo" + port := 8888 + + spec := types.VirtualMachineConfigSpec{ + Name: "nginx-container-after-reconfig", + Files: &types.VirtualMachineFileInfo{ + VmPathName: "[LocalDS_0] nginx", + }, + } + + f, _ := dc.Folders(ctx) + // Create a new VM + task, err := f.VmFolder.CreateVM(ctx, spec, pool, nil) + if err != nil { + log.Fatal(err) + } + + info, err := task.WaitForResult(ctx, nil) + if err != nil { + log.Fatal(err) + } + + vm := object.NewVirtualMachine(c, info.Result.(types.ManagedObjectReference)) + // PowerOn VM starts the nginx container + task, _ = vm.PowerOn(ctx) + err = task.Wait(ctx) + if err != nil { + log.Fatal(err) + } + + spec2 := types.VirtualMachineConfigSpec{ + ExtraConfig: constructNginxBacking(t, content, port), + } + + task, err = vm.Reconfigure(ctx, spec2) + if err != nil { + log.Fatal(err) + } + + info, err = task.WaitForResult(ctx, nil) + if err != nil { + log.Fatal(info, err) + } + + err = validateNginxContainer(t, vm, content, port) + if err != nil { + log.Fatal(err) + } + + // PowerOff stops the container + task, _ = vm.PowerOff(ctx) + _ = task.Wait(ctx) + // Destroy deletes the container + task, _ = vm.Destroy(ctx) + _ = task.Wait(ctx) + }) + // Output: 0 diff +} diff --git a/simulator/esx/host_config_filesystemvolume.go b/simulator/esx/host_config_filesystemvolume.go new file mode 100644 index 000000000..01c62d0a4 --- /dev/null +++ b/simulator/esx/host_config_filesystemvolume.go @@ -0,0 +1,144 @@ +/* +Copyright (c) 2017-2023 VMware, Inc. All Rights Reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + +http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package esx + +import ( + "github.com/vmware/govmomi/units" + "github.com/vmware/govmomi/vim25/types" +) + +// HostConfigInfo is the default template for the HostSystem config property. +// Capture method: +// govc object.collect -s -dump HostSystem:ha-host config.fileSystemVolume +// - slightly modified for uuids and DiskName +var HostFileSystemVolumeInfo = types.HostFileSystemVolumeInfo{ + VolumeTypeList: []string{"VMFS", "NFS", "NFS41", "vsan", "VVOL", "VFFS", "OTHER", "PMEM"}, + MountInfo: []types.HostFileSystemMountInfo{ + { + MountInfo: types.HostMountInfo{ + Path: "/vmfs/volumes/deadbeef-01234567-89ab-cdef00000003", + AccessMode: "readWrite", + Mounted: types.NewBool(true), + Accessible: types.NewBool(true), + InaccessibleReason: "", + MountFailedReason: "", + }, + Volume: &types.HostVmfsVolume{ + HostFileSystemVolume: types.HostFileSystemVolume{ + Type: "VMFS", + Name: "datastore1", + Capacity: 3.5 * units.TB, + }, + BlockSizeMb: 1, + BlockSize: units.KB, + UnmapGranularity: units.KB, + UnmapPriority: "low", + UnmapBandwidthSpec: (*types.VmfsUnmapBandwidthSpec)(nil), + MaxBlocks: 61 * units.MB, + MajorVersion: 6, + Version: "6.82", + Uuid: "deadbeef-01234567-89ab-cdef00000003", + Extent: []types.HostScsiDiskPartition{ + { + DiskName: "____simulated_volumes_____", + Partition: 8, + }, + }, + VmfsUpgradable: false, + ForceMountedInfo: (*types.HostForceMountedInfo)(nil), + Ssd: types.NewBool(true), + Local: types.NewBool(true), + ScsiDiskType: "", + }, + VStorageSupport: "vStorageUnsupported", + }, + { + MountInfo: types.HostMountInfo{ + Path: "/vmfs/volumes/deadbeef-01234567-89ab-cdef00000002", + AccessMode: "readWrite", + Mounted: types.NewBool(true), + Accessible: types.NewBool(true), + InaccessibleReason: "", + MountFailedReason: "", + }, + Volume: &types.HostVmfsVolume{ + HostFileSystemVolume: types.HostFileSystemVolume{ + Type: "OTHER", + Name: "OSDATA-deadbeef-01234567-89ab-cdef00000002", + Capacity: 128 * units.GB, + }, + BlockSizeMb: 1, + BlockSize: units.KB, + UnmapGranularity: 0, + UnmapPriority: "", + UnmapBandwidthSpec: (*types.VmfsUnmapBandwidthSpec)(nil), + MaxBlocks: 256 * units.KB, + MajorVersion: 1, + Version: "1.00", + Uuid: "deadbeef-01234567-89ab-cdef00000002", + Extent: []types.HostScsiDiskPartition{ + { + DiskName: "____simulated_volumes_____", + Partition: 7, + }, + }, + VmfsUpgradable: false, + ForceMountedInfo: (*types.HostForceMountedInfo)(nil), + Ssd: types.NewBool(true), + Local: types.NewBool(true), + ScsiDiskType: "", + }, + VStorageSupport: "vStorageUnsupported", + }, + { + MountInfo: types.HostMountInfo{ + Path: "/vmfs/volumes/deadbeef-01234567-89ab-cdef00000001", + AccessMode: "readOnly", + Mounted: types.NewBool(true), + Accessible: types.NewBool(true), + InaccessibleReason: "", + MountFailedReason: "", + }, + Volume: &types.HostVfatVolume{ + HostFileSystemVolume: types.HostFileSystemVolume{ + Type: "OTHER", + Name: "BOOTBANK1", + Capacity: 4 * units.GB, + }, + }, + VStorageSupport: "", + }, + { + MountInfo: types.HostMountInfo{ + Path: "/vmfs/volumes/deadbeef-01234567-89ab-cdef00000000", + AccessMode: "readOnly", + Mounted: types.NewBool(true), + Accessible: types.NewBool(true), + InaccessibleReason: "", + MountFailedReason: "", + }, + Volume: &types.HostVfatVolume{ + HostFileSystemVolume: types.HostFileSystemVolume{ + Type: "OTHER", + Name: "BOOTBANK2", + Capacity: 4 * units.GB, + }, + }, + VStorageSupport: "", + }, + }, +} diff --git a/simulator/esx/host_config_info.go b/simulator/esx/host_config_info.go index ced194f95..d56c3b607 100644 --- a/simulator/esx/host_config_info.go +++ b/simulator/esx/host_config_info.go @@ -50,6 +50,7 @@ var HostConfigInfo = types.HostConfigInfo{ ConsoleReservation: (*types.ServiceConsoleReservationInfo)(nil), VirtualMachineReservation: (*types.VirtualMachineMemoryReservationInfo)(nil), StorageDevice: &HostStorageDeviceInfo, + FileSystemVolume: &HostFileSystemVolumeInfo, SystemFile: nil, Network: &types.HostNetworkInfo{ Vswitch: []types.HostVirtualSwitch{ diff --git a/simulator/esx/setting.go b/simulator/esx/setting.go index da5dca20f..54ec6ead0 100644 --- a/simulator/esx/setting.go +++ b/simulator/esx/setting.go @@ -21,14 +21,20 @@ import "github.com/vmware/govmomi/vim25/types" // HardwareVersion is the default VirtualMachine.Config.Version var HardwareVersion = "vmx-13" -// Setting is captured from ESX's HostSystem.configManager.advancedOption +// AdvancedOptions is captured from ESX's HostSystem.configManager.advancedOption // Capture method: // // govc object.collect -s -dump $(govc object.collect -s HostSystem:ha-host configManager.advancedOption) setting -var Setting = []types.BaseOptionValue{ +var AdvancedOptions = []types.BaseOptionValue{ // This list is currently pruned to include a single option for testing &types.OptionValue{ Key: "Config.HostAgent.log.level", Value: "info", }, } + +// Setting is captured from ESX's HostSystem.ServiceContent.setting +// Capture method: +// +// govc object.collect -s -dump OptionManager:HostAgentSettings setting +var Setting = []types.BaseOptionValue{} diff --git a/simulator/feature_test.go b/simulator/feature_test.go index 92fb76b78..ea817e052 100644 --- a/simulator/feature_test.go +++ b/simulator/feature_test.go @@ -26,6 +26,7 @@ import ( "os" "os/exec" "path/filepath" + "strings" "github.com/vmware/govmomi" "github.com/vmware/govmomi/find" @@ -121,11 +122,13 @@ func Example_runContainer() { }, ExtraConfig: []types.BaseOptionValue{ &types.OptionValue{Key: "RUN.container", Value: args}, // run nginx + &types.OptionValue{Key: "RUN.port.80", Value: "8888"}, // test port remap }, } // Create a new VM task, err := f.VmFolder.CreateVM(ctx, spec, pool, nil) + if err != nil { log.Fatal(err) } @@ -144,13 +147,25 @@ func Example_runContainer() { ip, _ := vm.WaitForIP(ctx, true) // Returns the docker container's IP - // Count the number of bytes in feature_test.go via nginx + // Count the number of bytes in feature_test.go via nginx going direct to the container cmd := exec.Command("docker", "run", "--rm", "curlimages/curl", "curl", "-f", fmt.Sprintf("http://%s", ip)) var buf bytes.Buffer cmd.Stdout = &buf err = cmd.Run() - if err != nil { - log.Fatal(err) + res := buf.String() + + if err != nil || strings.TrimSpace(res) != fcontent { + log.Fatal(err, buf.String()) + } + + // Count the number of bytes in feature_test.go via nginx going via port remap on host + cmd = exec.Command("curl", "-f", "http://localhost:8888") + buf.Reset() + cmd.Stdout = &buf + err = cmd.Run() + res = buf.String() + if err != nil || strings.TrimSpace(res) != fcontent { + log.Fatal(err, buf.String()) } // PowerOff stops the container diff --git a/simulator/guest_operations_manager.go b/simulator/guest_operations_manager.go index 780f44a04..f05883580 100644 --- a/simulator/guest_operations_manager.go +++ b/simulator/guest_operations_manager.go @@ -69,7 +69,7 @@ func guestURL(ctx *Context, vm *VirtualMachine, path string) string { Host: "*", // See guest.FileManager.TransferURL Path: guestPrefix + strings.TrimPrefix(path, "/"), RawQuery: url.Values{ - "id": []string{vm.run.id}, + "id": []string{vm.svm.c.id}, "token": []string{ctx.Session.Key}, }.Encode(), }).String() @@ -79,7 +79,7 @@ func (m *GuestFileManager) InitiateFileTransferToGuest(ctx *Context, req *types. body := new(methods.InitiateFileTransferToGuestBody) vm := ctx.Map.Get(req.Vm).(*VirtualMachine) - err := vm.run.prepareGuestOperation(vm, req.Auth) + err := vm.svm.prepareGuestOperation(req.Auth) if err != nil { body.Fault_ = Fault("", err) return body @@ -96,7 +96,7 @@ func (m *GuestFileManager) InitiateFileTransferFromGuest(ctx *Context, req *type body := new(methods.InitiateFileTransferFromGuestBody) vm := ctx.Map.Get(req.Vm).(*VirtualMachine) - err := vm.run.prepareGuestOperation(vm, req.Auth) + err := vm.svm.prepareGuestOperation(req.Auth) if err != nil { body.Fault_ = Fault("", err) return body @@ -126,7 +126,7 @@ func (m *GuestProcessManager) StartProgramInGuest(ctx *Context, req *types.Start vm := ctx.Map.Get(req.Vm).(*VirtualMachine) - fault := vm.run.prepareGuestOperation(vm, auth) + fault := vm.svm.prepareGuestOperation(auth) if fault != nil { body.Fault_ = Fault("", fault) } @@ -141,7 +141,7 @@ func (m *GuestProcessManager) StartProgramInGuest(ctx *Context, req *types.Start args = append(args, "-e", e) } - args = append(args, vm.run.id, spec.ProgramPath, spec.Arguments) + args = append(args, vm.svm.c.id, spec.ProgramPath, spec.Arguments) spec.ProgramPath = "docker" spec.Arguments = strings.Join(args, " ") @@ -213,7 +213,7 @@ func (m *GuestFileManager) mktemp(ctx *Context, req *types.CreateTemporaryFileIn vm := ctx.Map.Get(req.Vm).(*VirtualMachine) - return vm.run.exec(ctx, vm, req.Auth, args) + return vm.svm.exec(ctx, req.Auth, args) } func (m *GuestFileManager) CreateTemporaryFileInGuest(ctx *Context, req *types.CreateTemporaryFileInGuest) soap.HasFault { @@ -298,7 +298,7 @@ func (m *GuestFileManager) ListFilesInGuest(ctx *Context, req *types.ListFilesIn return body } - res, fault := vm.run.exec(ctx, vm, req.Auth, listFiles(req)) + res, fault := vm.svm.exec(ctx, req.Auth, listFiles(req)) if fault != nil { body.Fault_ = Fault("", fault) return body @@ -317,7 +317,7 @@ func (m *GuestFileManager) DeleteFileInGuest(ctx *Context, req *types.DeleteFile vm := ctx.Map.Get(req.Vm).(*VirtualMachine) - _, fault := vm.run.exec(ctx, vm, req.Auth, args) + _, fault := vm.svm.exec(ctx, req.Auth, args) if fault != nil { body.Fault_ = Fault("", fault) return body @@ -338,7 +338,7 @@ func (m *GuestFileManager) DeleteDirectoryInGuest(ctx *Context, req *types.Delet vm := ctx.Map.Get(req.Vm).(*VirtualMachine) - _, fault := vm.run.exec(ctx, vm, req.Auth, args) + _, fault := vm.svm.exec(ctx, req.Auth, args) if fault != nil { body.Fault_ = Fault("", fault) return body @@ -359,7 +359,7 @@ func (m *GuestFileManager) MakeDirectoryInGuest(ctx *Context, req *types.MakeDir vm := ctx.Map.Get(req.Vm).(*VirtualMachine) - _, fault := vm.run.exec(ctx, vm, req.Auth, args) + _, fault := vm.svm.exec(ctx, req.Auth, args) if fault != nil { body.Fault_ = Fault("", fault) return body @@ -381,7 +381,7 @@ func (m *GuestFileManager) MoveFileInGuest(ctx *Context, req *types.MoveFileInGu vm := ctx.Map.Get(req.Vm).(*VirtualMachine) - _, fault := vm.run.exec(ctx, vm, req.Auth, args) + _, fault := vm.svm.exec(ctx, req.Auth, args) if fault != nil { body.Fault_ = Fault("", fault) return body @@ -399,7 +399,7 @@ func (m *GuestFileManager) MoveDirectoryInGuest(ctx *Context, req *types.MoveDir vm := ctx.Map.Get(req.Vm).(*VirtualMachine) - _, fault := vm.run.exec(ctx, vm, req.Auth, args) + _, fault := vm.svm.exec(ctx, req.Auth, args) if fault != nil { body.Fault_ = Fault("", fault) return body @@ -424,7 +424,7 @@ func (m *GuestFileManager) ChangeFileAttributesInGuest(ctx *Context, req *types. if attr.Permissions != 0 { args := []string{"chmod", fmt.Sprintf("%#o", attr.Permissions), req.GuestFilePath} - _, fault := vm.run.exec(ctx, vm, req.Auth, args) + _, fault := vm.svm.exec(ctx, req.Auth, args) if fault != nil { body.Fault_ = Fault("", fault) return body @@ -443,7 +443,7 @@ func (m *GuestFileManager) ChangeFileAttributesInGuest(ctx *Context, req *types. if c.id != nil { args := []string{c.cmd, fmt.Sprintf("%d", *c.id), req.GuestFilePath} - _, fault := vm.run.exec(ctx, vm, req.Auth, args) + _, fault := vm.svm.exec(ctx, req.Auth, args) if fault != nil { body.Fault_ = Fault("", fault) return body diff --git a/simulator/host_system.go b/simulator/host_system.go index f28101a8c..f8cd3fe7c 100644 --- a/simulator/host_system.go +++ b/simulator/host_system.go @@ -17,8 +17,10 @@ limitations under the License. package simulator import ( + "fmt" "net" "os" + "sync" "time" "github.com/vmware/govmomi/simulator/esx" @@ -30,10 +32,16 @@ import ( var ( hostPortUnique = os.Getenv("VCSIM_HOST_PORT_UNIQUE") == "true" + + globalLock sync.Mutex + // globalHostCount is used to construct unique hostnames. Should be consumed under globalLock. + globalHostCount = 0 ) type HostSystem struct { mo.HostSystem + + sh *simHost } func asHostSystemMO(obj mo.Reference) (*mo.HostSystem, bool) { @@ -72,13 +80,23 @@ func NewHostSystem(host mo.HostSystem) *HostSystem { deepCopy(hs.Config, cfg) hs.Config = cfg + // copy over the reference advanced options so each host can have it's own, allowing hosts to be configured for + // container backing individually + deepCopy(esx.AdvancedOptions, &cfg.Option) + + // add a supported option to the AdvancedOption manager + simOption := types.OptionDef{ElementDescription: types.ElementDescription{Key: advOptContainerBackingImage}} + // TODO: how do we enter patterns here? Or should we stick to a list in the value? + // patterns become necessary if we want to enforce correctness on options for RUN.underlay. or allow RUN.port.xxx + hs.Config.OptionDef = append(hs.Config.OptionDef, simOption) + config := []struct { ref **types.ManagedObjectReference obj mo.Reference }{ {&hs.ConfigManager.DatastoreSystem, &HostDatastoreSystem{Host: &hs.HostSystem}}, {&hs.ConfigManager.NetworkSystem, NewHostNetworkSystem(&hs.HostSystem)}, - {&hs.ConfigManager.AdvancedOption, NewOptionManager(nil, esx.Setting)}, + {&hs.ConfigManager.AdvancedOption, NewOptionManager(nil, nil, &hs.Config.Option)}, {&hs.ConfigManager.FirewallSystem, NewHostFirewallSystem(&hs.HostSystem)}, {&hs.ConfigManager.StorageSystem, NewHostStorageSystem(&hs.HostSystem)}, } @@ -92,12 +110,23 @@ func NewHostSystem(host mo.HostSystem) *HostSystem { return hs } -func (h *HostSystem) configure(spec types.HostConnectSpec, connected bool) { +func (h *HostSystem) configure(ctx *Context, spec types.HostConnectSpec, connected bool) { h.Runtime.ConnectionState = types.HostSystemConnectionStateDisconnected if connected { h.Runtime.ConnectionState = types.HostSystemConnectionStateConnected } - if net.ParseIP(spec.HostName) != nil { + + // lets us construct non-conflicting hostname automatically if omitted + // does not use the unique port instead to avoid constraints on port, such as >1024 + + globalLock.Lock() + instanceID := globalHostCount + globalHostCount++ + globalLock.Unlock() + + if spec.HostName == "" { + spec.HostName = fmt.Sprintf("esx-%d", instanceID) + } else if net.ParseIP(spec.HostName) != nil { h.Config.Network.Vnic[0].Spec.Ip.IpAddress = spec.HostName } @@ -106,6 +135,241 @@ func (h *HostSystem) configure(spec types.HostConnectSpec, connected bool) { id := newUUID(h.Name) h.Summary.Hardware.Uuid = id h.Hardware.SystemInfo.Uuid = id + + var err error + h.sh, err = createSimulationHost(ctx, h) + if err != nil { + panic("failed to create simulation host and no path to return error: " + err.Error()) + } +} + +// configureContainerBacking sets up _this_ host for simulation using a container backing. +// Args: +// +// image - the container image with which to simulate the host +// mounts - array of mount info that should be translated into /vmfs/volumes/... mounts backed by container volumes +// networks - names of bridges to use for underlays. Will create a pNIC for each. The first will be treated as the management network. +// +// Restrictions adopted from createSimulationHost: +// * no mock of VLAN connectivity +// * only a single vmknic, used for "the management IP" +// * pNIC connectivity does not directly impact VMs/vmks using it as uplink +// +// The pnics will be named using standard pattern, ie. vmnic0, vmnic1, ... +// This will sanity check the NetConfig for "management" nicType to ensure that it maps through PortGroup->vSwitch->pNIC to vmnic0. +func (h *HostSystem) configureContainerBacking(ctx *Context, image string, mounts []types.HostFileSystemMountInfo, networks ...string) error { + option := &types.OptionValue{ + Key: advOptContainerBackingImage, + Value: image, + } + + advOpts := ctx.Map.Get(h.ConfigManager.AdvancedOption.Reference()).(*OptionManager) + fault := advOpts.UpdateOptions(&types.UpdateOptions{ChangedValue: []types.BaseOptionValue{option}}).Fault() + if fault != nil { + panic(fault) + } + + h.Config.FileSystemVolume = nil + if mounts != nil { + h.Config.FileSystemVolume = &types.HostFileSystemVolumeInfo{ + VolumeTypeList: []string{"VMFS", "OTHER"}, + MountInfo: mounts, + } + } + + // force at least a management network + if len(networks) == 0 { + networks = []string{defaultUnderlayBridgeName} + } + + // purge pNICs from the template - it makes no sense to keep them for a sim host + h.Config.Network.Pnic = make([]types.PhysicalNic, len(networks)) + + // purge any IPs and MACs associated with existing NetConfigs for the host + for cfgIdx := range h.Config.VirtualNicManagerInfo.NetConfig { + config := &h.Config.VirtualNicManagerInfo.NetConfig[cfgIdx] + for candidateIdx := range config.CandidateVnic { + candidate := &config.CandidateVnic[candidateIdx] + candidate.Spec.Ip.IpAddress = "0.0.0.0" + candidate.Spec.Ip.SubnetMask = "0.0.0.0" + candidate.Spec.Mac = "00:00:00:00:00:00" + } + } + + // The presence of a pNIC is used to indicate connectivity to a specific underlay. We construct an empty pNIC entry and specify the underly via + // host.ConfigManager.AdvancedOptions. The pNIC will be populated with the MAC (accurate) and IP (divergence - we need to stash it somewhere) for the veth. + // We create a NetConfig "management" entry for the first pNIC - this will be populated with the IP of the "host" container. + + // create a pNIC for each underlay + for i, net := range networks { + name := fmt.Sprintf("vmnic%d", i) + + // we don't have a natural field for annotating which pNIC is connected to which network, so stash it in an adv option. + option := &types.OptionValue{ + Key: advOptPrefixPnicToUnderlayPrefix + name, + Value: net, + } + fault = advOpts.UpdateOptions(&types.UpdateOptions{ChangedValue: []types.BaseOptionValue{option}}).Fault() + if fault != nil { + panic(fault) + } + + h.Config.Network.Pnic[i] = types.PhysicalNic{ + Key: "key-vim.host.PhysicalNic-" + name, + Device: name, + Pci: fmt.Sprintf("0000:%2d:00.0", i+1), + Driver: "vcsim-bridge", + DriverVersion: "1.2.10.0", + FirmwareVersion: "1.57, 0x80000185", + LinkSpeed: &types.PhysicalNicLinkInfo{ + SpeedMb: 10000, + Duplex: true, + }, + ValidLinkSpecification: []types.PhysicalNicLinkInfo{ + { + SpeedMb: 10000, + Duplex: true, + }, + }, + Spec: types.PhysicalNicSpec{ + Ip: &types.HostIpConfig{}, + LinkSpeed: (*types.PhysicalNicLinkInfo)(nil), + EnableEnhancedNetworkingStack: types.NewBool(false), + EnsInterruptEnabled: types.NewBool(false), + }, + WakeOnLanSupported: false, + Mac: "00:00:00:00:00:00", + FcoeConfiguration: &types.FcoeConfig{ + PriorityClass: 3, + SourceMac: "00:00:00:00:00:00", + VlanRange: []types.FcoeConfigVlanRange{ + {}, + }, + Capabilities: types.FcoeConfigFcoeCapabilities{}, + FcoeActive: false, + }, + VmDirectPathGen2Supported: types.NewBool(false), + VmDirectPathGen2SupportedMode: "", + ResourcePoolSchedulerAllowed: types.NewBool(false), + ResourcePoolSchedulerDisallowedReason: nil, + AutoNegotiateSupported: types.NewBool(true), + EnhancedNetworkingStackSupported: types.NewBool(false), + EnsInterruptSupported: types.NewBool(false), + RdmaDevice: "", + DpuId: "", + } + } + + // sanity check that everything's hung together sufficiently well + details, err := h.getNetConfigInterface(ctx, "management") + if err != nil { + return err + } + + if details.uplink == nil || details.uplink.Device != "vmnic0" { + return fmt.Errorf("Config provided for host %s does not result in a consistent 'management' NetConfig that's bound to 'vmnic0'", h.Name) + } + + return nil +} + +// netConfigDetails is used to packaged up all the related network entities associated with a NetConfig binding +type netConfigDetails struct { + nicType string + netconfig *types.VirtualNicManagerNetConfig + vmk *types.HostVirtualNic + netstack *types.HostNetStackInstance + portgroup *types.HostPortGroup + vswitch *types.HostVirtualSwitch + uplink *types.PhysicalNic +} + +// getNetConfigInterface returns the set of constructs active for a given nicType (eg. "management", "vmotion") +// This method is provided because the Config structure held by HostSystem is heavily interconnected but serialized and not cross-linked with pointers. +// As such there's a _lot_ of cross-referencing that needs to be done to navigate. +// The pNIC returned is the uplink associated with the vSwitch for the netconfig +func (h *HostSystem) getNetConfigInterface(ctx *Context, nicType string) (*netConfigDetails, error) { + details := &netConfigDetails{ + nicType: nicType, + } + + for i := range h.Config.VirtualNicManagerInfo.NetConfig { + if h.Config.VirtualNicManagerInfo.NetConfig[i].NicType == nicType { + details.netconfig = &h.Config.VirtualNicManagerInfo.NetConfig[i] + break + } + } + if details.netconfig == nil { + return nil, fmt.Errorf("no matching NetConfig for NicType=%s", nicType) + } + + if details.netconfig.SelectedVnic == nil { + return details, nil + } + + vnicKey := details.netconfig.SelectedVnic[0] + for i := range details.netconfig.CandidateVnic { + if details.netconfig.CandidateVnic[i].Key == vnicKey { + details.vmk = &details.netconfig.CandidateVnic[i] + break + } + } + if details.vmk == nil { + panic(fmt.Sprintf("NetConfig for host %s references non-existant vNIC key %s for %s nicType", h.Name, vnicKey, nicType)) + } + + portgroupName := details.vmk.Portgroup + netstackKey := details.vmk.Spec.NetStackInstanceKey + + for i := range h.Config.Network.NetStackInstance { + if h.Config.Network.NetStackInstance[i].Key == netstackKey { + details.netstack = &h.Config.Network.NetStackInstance[i] + break + } + } + if details.netstack == nil { + panic(fmt.Sprintf("NetConfig for host %s references non-existant NetStack key %s for %s nicType", h.Name, netstackKey, nicType)) + } + + for i := range h.Config.Network.Portgroup { + // TODO: confirm correctness of this - seems weird it references the Spec.Name instead of the key like everything else. + if h.Config.Network.Portgroup[i].Spec.Name == portgroupName { + details.portgroup = &h.Config.Network.Portgroup[i] + break + } + } + if details.portgroup == nil { + panic(fmt.Sprintf("NetConfig for host %s references non-existant PortGroup name %s for %s nicType", h.Name, portgroupName, nicType)) + } + + vswitchKey := details.portgroup.Vswitch + for i := range h.Config.Network.Vswitch { + if h.Config.Network.Vswitch[i].Key == vswitchKey { + details.vswitch = &h.Config.Network.Vswitch[i] + break + } + } + if details.vswitch == nil { + panic(fmt.Sprintf("NetConfig for host %s references non-existant vSwitch key %s for %s nicType", h.Name, vswitchKey, nicType)) + } + + if len(details.vswitch.Pnic) != 1 { + // to change this, look at the Active NIC in the NicTeamingPolicy, but for now not worth it + panic(fmt.Sprintf("vSwitch %s for host %s has multiple pNICs associated which is not supported.", vswitchKey, h.Name)) + } + + pnicKey := details.vswitch.Pnic[0] + for i := range h.Config.Network.Pnic { + if h.Config.Network.Pnic[i].Key == pnicKey { + details.uplink = &h.Config.Network.Pnic[i] + break + } + } + if details.uplink == nil { + panic(fmt.Sprintf("NetConfig for host %s references non-existant pNIC key %s for %s nicType", h.Name, pnicKey, nicType)) + } + + return details, nil } func (h *HostSystem) event() types.HostEvent { @@ -207,7 +471,7 @@ func CreateStandaloneHost(ctx *Context, f *Folder, spec types.HostConnectSpec) ( pool := NewResourcePool() host := NewHostSystem(template) - host.configure(spec, false) + host.configure(ctx, spec, false) summary := new(types.ComputeResourceSummary) addComputeResource(summary, host) @@ -247,6 +511,17 @@ func (h *HostSystem) DestroyTask(ctx *Context, req *types.Destroy_Task) soap.Has f := ctx.Map.getEntityParent(h, "Folder").(*Folder) folderRemoveChild(ctx, &f.Folder, h.Reference()) + err := h.sh.remove(ctx) + + if err != nil { + return nil, &types.RuntimeFault{ + MethodFault: types.MethodFault{ + FaultCause: &types.LocalizedMethodFault{ + Fault: &types.SystemErrorFault{Reason: err.Error()}, + LocalizedMessage: err.Error()}}} + } + + // TODO: should there be events on lifecycle operations as with VMs? return nil, nil }) diff --git a/simulator/host_system_test.go b/simulator/host_system_test.go index ffd43802f..99e61b2c4 100644 --- a/simulator/host_system_test.go +++ b/simulator/host_system_test.go @@ -20,6 +20,8 @@ import ( "context" "testing" + "github.com/stretchr/testify/assert" + "github.com/vmware/govmomi" "github.com/vmware/govmomi/find" "github.com/vmware/govmomi/object" @@ -127,9 +129,9 @@ func TestNewHostSystem(t *testing.T) { } hs := NewHostSystem(esx.HostSystem) - if hs.Summary.Runtime != &hs.Runtime { - t.Fatal("expected hs.Summary.Runtime == &hs.Runtime; got !=") - } + + assert.Equal(t, &hs.Runtime, hs.Summary.Runtime, "expected pointer to runtime in summary") + assert.False(t, esx.AdvancedOptions[0] == hs.Config.Option[0], "expected each host to have it's own advanced options") } func TestDestroyHostSystem(t *testing.T) { diff --git a/simulator/model.go b/simulator/model.go index b3da2900c..99d7625e1 100644 --- a/simulator/model.go +++ b/simulator/model.go @@ -492,7 +492,7 @@ func (m *Model) Create() error { // 1 NIC per VM, backed by a DVPG if Model.Portgroup > 0 vmnet := esx.EthernetCard.Backing - // addHost adds a cluster host or a stanalone host. + // addHost adds a cluster host or a standalone host. addHost := func(name string, f func(types.HostConnectSpec) (*object.Task, error)) (*object.HostSystem, error) { spec := types.HostConnectSpec{ HostName: name, @@ -855,7 +855,7 @@ func (m *Model) Remove() { Map.m.Lock() for _, obj := range Map.objects { if vm, ok := obj.(*VirtualMachine); ok { - vm.run.remove(vm) + vm.svm.remove(SpoofContext()) } } Map.m.Unlock() diff --git a/simulator/option_manager.go b/simulator/option_manager.go index efcdee215..1dd1688cd 100644 --- a/simulator/option_manager.go +++ b/simulator/option_manager.go @@ -28,19 +28,45 @@ import ( "github.com/vmware/govmomi/vim25/types" ) +// OptionManager is used in at least two locations for ESX: +// 1. ServiceContent.setting - this is empty on ESX and //TODO on VC +// 2. ConfigManager.advancedOption - this is where the bulk of the ESX settings are found type OptionManager struct { mo.OptionManager + + // mirror is an array to keep in sync with OptionManager.Settings. Necessary because we use append. + // uni-directional - changes made to the mirrored array are not reflected back to Settings + mirror *[]types.BaseOptionValue +} + +func asOptionManager(ctx *Context, obj mo.Reference) (*OptionManager, bool) { + om, ok := ctx.Map.Get(obj.Reference()).(*OptionManager) + return om, ok } -func NewOptionManager(ref *types.ManagedObjectReference, setting []types.BaseOptionValue) object.Reference { +// NewOptionManager constructs the type. If mirror is non-nil it takes precedence over settings, and settings is ignored. +// Args: +// - ref - used to set OptionManager.Self if non-nil +// - setting - initial options, may be nil. +// - mirror - options array to keep updated with the OptionManager.Settings, may be nil. +func NewOptionManager(ref *types.ManagedObjectReference, setting []types.BaseOptionValue, mirror *[]types.BaseOptionValue) object.Reference { s := &OptionManager{} + + s.Setting = setting + if mirror != nil { + s.mirror = mirror + s.Setting = *mirror + } + if ref != nil { s.Self = *ref } - s.Setting = setting + return s } +// init constructs the OptionManager for ServiceContent.setting from the template directories. +// This does _not_ construct the OptionManager for ConfigManager.advancedOption. func (m *OptionManager) init(r *Registry) { if len(m.Setting) == 0 { if r.IsVPX() { @@ -103,6 +129,9 @@ func (m *OptionManager) UpdateOptions(req *types.UpdateOptions) soap.HasFault { } m.Setting = append(m.Setting, change) + if m.mirror != nil { + *m.mirror = m.Setting + } } body.Res = new(types.UpdateOptionsResponse) diff --git a/simulator/virtual_machine.go b/simulator/virtual_machine.go index 524b6e24c..d6671edff 100644 --- a/simulator/virtual_machine.go +++ b/simulator/virtual_machine.go @@ -46,7 +46,7 @@ type VirtualMachine struct { log string sid int32 - run container + svm *simVM uid uuid.UUID imc *types.CustomizationSpec } @@ -394,7 +394,8 @@ func extraConfigKey(key string) string { return key } -func (vm *VirtualMachine) applyExtraConfig(spec *types.VirtualMachineConfigSpec) { +func (vm *VirtualMachine) applyExtraConfig(ctx *Context, spec *types.VirtualMachineConfigSpec) types.BaseMethodFault { + var removedContainerBacking bool var changes []types.PropertyChange for _, c := range spec.ExtraConfig { val := c.GetOptionValue() @@ -419,6 +420,9 @@ func (vm *VirtualMachine) applyExtraConfig(spec *types.VirtualMachineConfigSpec) vm.Config.ExtraConfig = append(vm.Config.ExtraConfig, c) } else { if s, ok := val.Value.(string); ok && s == "" { + if key == ContainerBackingOptionKey { + removedContainerBacking = true + } // Remove existing element l := len(vm.Config.ExtraConfig) vm.Config.ExtraConfig[keyIndex] = vm.Config.ExtraConfig[l-1] @@ -450,9 +454,52 @@ func (vm *VirtualMachine) applyExtraConfig(spec *types.VirtualMachineConfigSpec) ) } } + + // create the container backing before we publish the updates so the simVM is available before handlers + // get triggered + var fault types.BaseMethodFault + if vm.svm == nil { + vm.svm = createSimulationVM(vm) + + // check to see if the VM is already powered on - if so we need to retroactively hit that path here + if vm.Runtime.PowerState == types.VirtualMachinePowerStatePoweredOn { + err := vm.svm.start(ctx) + if err != nil { + // don't attempt to undo the changes already made - just return an error + // we'll retry the svm.start operation on pause/restart calls + fault = &types.VAppConfigFault{ + VimFault: types.VimFault{ + MethodFault: types.MethodFault{ + FaultCause: &types.LocalizedMethodFault{ + Fault: &types.SystemErrorFault{Reason: err.Error()}, + LocalizedMessage: err.Error()}}}} + } + } + } else if removedContainerBacking { + err := vm.svm.remove(ctx) + if err == nil { + // remove link from container to VM so callbacks no longer reflect state + vm.svm.vm = nil + // nil container backing reference to return this to a pure in-mem simulated VM + vm.svm = nil + + } else { + // don't attempt to undo the changes already made - just return an error + // we'll retry the svm.start operation on pause/restart calls + fault = &types.VAppConfigFault{ + VimFault: types.VimFault{ + MethodFault: types.MethodFault{ + FaultCause: &types.LocalizedMethodFault{ + Fault: &types.SystemErrorFault{Reason: err.Error()}, + LocalizedMessage: err.Error()}}}} + } + } + if len(changes) != 0 { Map.Update(vm, changes) } + + return fault } func validateGuestID(id string) types.BaseMethodFault { @@ -1495,6 +1542,7 @@ func (vm *VirtualMachine) genVmdkPath(p object.DatastorePath) (string, types.Bas func (vm *VirtualMachine) configureDevices(ctx *Context, spec *types.VirtualMachineConfigSpec) types.BaseMethodFault { devices := object.VirtualDeviceList(vm.Config.Hardware.Device) + var err types.BaseMethodFault for i, change := range spec.DeviceChange { dspec := change.GetVirtualDeviceConfigSpec() device := dspec.Device.GetVirtualDevice() @@ -1531,7 +1579,7 @@ func (vm *VirtualMachine) configureDevices(ctx *Context, spec *types.VirtualMach } key := device.Key - err := vm.configureDevice(ctx, devices, dspec, nil) + err = vm.configureDevice(ctx, devices, dspec, nil) if err != nil { return err } @@ -1558,7 +1606,7 @@ func (vm *VirtualMachine) configureDevices(ctx *Context, spec *types.VirtualMach device.DeviceInfo.GetDescription().Summary = "" // regenerate summary } - err := vm.configureDevice(ctx, devices, dspec, oldDevice) + err = vm.configureDevice(ctx, devices, dspec, oldDevice) if err != nil { return err } @@ -1573,9 +1621,16 @@ func (vm *VirtualMachine) configureDevices(ctx *Context, spec *types.VirtualMach {Name: "config.hardware.device", Val: []types.BaseVirtualDevice(devices)}, }) - vm.updateDiskLayouts() + err = vm.updateDiskLayouts() + if err != nil { + return err + } - vm.applyExtraConfig(spec) // Do this after device config, as some may apply to the devices themselves (e.g. ethernet -> guest.net) + // Do this after device config, as some may apply to the devices themselves (e.g. ethernet -> guest.net) + err = vm.applyExtraConfig(ctx, spec) + if err != nil { + return err + } return nil } @@ -1610,14 +1665,23 @@ func (c *powerVMTask) Run(task *Task) (types.AnyType, types.BaseMethodFault) { return nil, new(types.InvalidState) } - c.run.start(c.ctx, c.VirtualMachine) + err := c.svm.start(c.ctx) + if err != nil { + return nil, &types.MissingPowerOnConfiguration{ + VAppConfigFault: types.VAppConfigFault{ + VimFault: types.VimFault{ + MethodFault: types.MethodFault{ + FaultCause: &types.LocalizedMethodFault{ + Fault: &types.SystemErrorFault{Reason: err.Error()}, + LocalizedMessage: err.Error()}}}}} + } c.ctx.postEvent( &types.VmStartingEvent{VmEvent: event}, &types.VmPoweredOnEvent{VmEvent: event}, ) c.customize(c.ctx) case types.VirtualMachinePowerStatePoweredOff: - c.run.stop(c.ctx, c.VirtualMachine) + c.svm.stop(c.ctx) c.ctx.postEvent( &types.VmStoppingEvent{VmEvent: event}, &types.VmPoweredOffEvent{VmEvent: event}, @@ -1630,7 +1694,7 @@ func (c *powerVMTask) Run(task *Task) (types.AnyType, types.BaseMethodFault) { } } - c.run.pause(c.ctx, c.VirtualMachine) + c.svm.pause(c.ctx) c.ctx.postEvent( &types.VmSuspendingEvent{VmEvent: event}, &types.VmSuspendedEvent{VmEvent: event}, @@ -1737,7 +1801,7 @@ func (vm *VirtualMachine) RebootGuest(ctx *Context, req *types.RebootGuest) soap } if vm.Guest.ToolsRunningStatus == string(types.VirtualMachineToolsRunningStatusGuestToolsRunning) { - vm.run.restart(ctx, vm) + vm.svm.restart(ctx) body.Res = new(types.RebootGuestResponse) } else { body.Fault_ = Fault("", new(types.ToolsUnavailable)) @@ -1801,6 +1865,7 @@ func (vm *VirtualMachine) DestroyTask(ctx *Context, req *types.Destroy_Task) soa task := CreateTask(vm, "destroy", func(t *Task) (types.AnyType, types.BaseMethodFault) { if dc == nil { return nil, &types.ManagedObjectNotFound{Obj: vm.Self} // If our Parent was destroyed, so were we. + // TODO: should this also trigger container removal? } r := vm.UnregisterVM(ctx, &types.UnregisterVM{ @@ -1825,7 +1890,14 @@ func (vm *VirtualMachine) DestroyTask(ctx *Context, req *types.Destroy_Task) soa Datacenter: &dc.Self, }) - vm.run.remove(vm) + err := vm.svm.remove(ctx) + if err != nil { + return nil, &types.RuntimeFault{ + MethodFault: types.MethodFault{ + FaultCause: &types.LocalizedMethodFault{ + Fault: &types.SystemErrorFault{Reason: err.Error()}, + LocalizedMessage: err.Error()}}} + } return nil, nil }) @@ -2321,7 +2393,7 @@ func (vm *VirtualMachine) ShutdownGuest(ctx *Context, c *types.ShutdownGuest) so ctx.postEvent(&types.VmGuestShutdownEvent{VmEvent: event}) _ = CreateTask(vm, "shutdownGuest", func(*Task) (types.AnyType, types.BaseMethodFault) { - vm.run.stop(ctx, vm) + vm.svm.stop(ctx) ctx.Map.Update(vm, []types.PropertyChange{ {Name: "runtime.powerState", Val: types.VirtualMachinePowerStatePoweredOff}, @@ -2354,7 +2426,7 @@ func (vm *VirtualMachine) StandbyGuest(ctx *Context, c *types.StandbyGuest) soap ctx.postEvent(&types.VmGuestStandbyEvent{VmEvent: event}) _ = CreateTask(vm, "standbyGuest", func(*Task) (types.AnyType, types.BaseMethodFault) { - vm.run.pause(ctx, vm) + vm.svm.pause(ctx) ctx.Map.Update(vm, []types.PropertyChange{ {Name: "runtime.powerState", Val: types.VirtualMachinePowerStateSuspended}, diff --git a/simulator/vpx/setting.go b/simulator/vpx/setting.go index 7bbf0c02d..7625824da 100644 --- a/simulator/vpx/setting.go +++ b/simulator/vpx/setting.go @@ -18,6 +18,8 @@ package vpx import "github.com/vmware/govmomi/vim25/types" +// TODO: figure out whether this is Setting or AdvancedOptions - see esx/setting.go for the difference + // Setting is captured from VC's ServiceContent.OptionManager.setting var Setting = []types.BaseOptionValue{ // This list is currently pruned to include sso options only with sso.enabled set to false