From 9e5a31ccaba03e6f7a934fc03c2bbc4d0e501e07 Mon Sep 17 00:00:00 2001 From: George Hicken Date: Wed, 12 Jul 2023 18:44:39 +0000 Subject: [PATCH] WIP: Create underlay networks for simhost pNICs This uses an existing bridge or creates a new one as needed for simhost pNICs. It does NOT currently rework the config for mgmt vmk to hold the IP address assigned to the container. Next steps: * figure out how the mgmt NetConfig gets constructed and associated with the pNIC. What parts of the config passed in do we preserve vs discard? * update container_host_system to inject IP into the appropriate locations once available --- simulator/container.go | 66 ++++++ simulator/container_host_system.go | 174 +++++++++++----- simulator/container_host_system_test.go | 2 +- simulator/host_system.go | 257 ++++++++++++------------ 4 files changed, 321 insertions(+), 178 deletions(-) diff --git a/simulator/container.go b/simulator/container.go index 33ebe5ca4..e79df1db9 100644 --- a/simulator/container.go +++ b/simulator/container.go @@ -24,6 +24,7 @@ import ( "fmt" "io" "log" + "net" "os" "os/exec" "path" @@ -38,6 +39,7 @@ var ( const ( deleteWithContainer = "lifecycle=container" + createdByVcsim = "createdBy=vcsim" ) func init() { @@ -97,6 +99,11 @@ func extractNameAndUid(containerName string) (name string, uid string, err error return parts[0], parts[1], nil } +func prefixToMask(prefix int) string { + mask := net.CIDRMask(prefix, 32) + return fmt.Sprintf("%d.%d.%d.%d", mask[0], mask[1], mask[2], mask[3]) +} + type tarEntry struct { header *tar.Header content []byte @@ -248,6 +255,65 @@ func createVolume(volumeName string, labels []string, files []tarEntry) (string, return uid, err } +// createBridge creates a bridge network if one does not already exist +// returns: +// +// uid - string +// err - error or nil +func createBridge(bridgeName string, labels ...string) (string, error) { + + // {"CreatedAt":"2023-07-11 19:22:25.45027052 +0000 UTC","Driver":"bridge","ID":"fe52c7502c5d","IPv6":"false","Internal":"false","Labels":"goodbye=,hello=","Name":"testnet","Scope":"local"} + type bridgeNet struct { + CreatedAt string + Driver string + ID string + IPv6 string + Internal string + Labels string + Name string + Scope string + } + + // if the underlay bridge already exists, return that + // we don't check for a specific label or similar so that it's possible to use a bridge created by other frameworks for composite testing + var bridge bridgeNet + cmd := exec.Command("docker", "network", "ls", "--format", "json", "-f", fmt.Sprintf("name=%s$", bridgeName)) + out, err := cmd.Output() + if err != nil { + log.Printf("vcsim %s: %s", cmd.Args, err) + } + + // unfortunately docker returns an empty string not an empty json doc + if len(out) != 0 { + err = json.Unmarshal(out, &bridge) + if err != nil { + log.Printf("vcsim %s: %s", cmd.Args, err) + return "", err + } + + return bridge.ID, nil + } + + run := []string{"network", "create", "--label", createdByVcsim} + for i := range labels { + run = append(run, "--label", labels[i]) + } + run = append(run, bridgeName) + + cmd = exec.Command("docker", run...) + out, err = cmd.Output() + if err != nil { + log.Printf("vcsim %s: %s", cmd.Args, err) + return "", err + } + + // the ID returned in network ls is only 12 characters, so normalize to that + id := string(out[0:12]) + log.Printf("vcsim %s: id=%s", cmd.Args, id) + + return id, nil +} + // create // - name - pretty name, eg. vm name // - id - uuid or similar - this is merged into container name rather than dictating containerID diff --git a/simulator/container_host_system.go b/simulator/container_host_system.go index cf7ba2cf2..1c061a01b 100644 --- a/simulator/container_host_system.go +++ b/simulator/container_host_system.go @@ -46,46 +46,25 @@ type simHost struct { c *container } -// createSimulationHost inspects the provided HostSystem and creates a simHost binding for it if -// the vm.Config.ExtraConfig set contains a key "RUN.container". -// If the ExtraConfig set does not contain that key, this returns nil. -// Methods on the simHost type are written to check for nil object so the return from this call can be blindly -// assigned and invoked without the caller caring about whether a binding for a backing container was warranted. -func createSimulationHost(ctx *Context, host *HostSystem) (*simHost, error) { - sh := &simHost{ - host: host, - } - - advOpts := ctx.Map.Get(host.ConfigManager.AdvancedOption.Reference()).(*OptionManager) - fault := advOpts.QueryOptions(&types.QueryOptions{Name: "RUN.container"}).(*methods.QueryOptionsBody).Fault() - if fault != nil { - if _, ok := fault.VimFault().(*types.InvalidName); ok { - return nil, nil - } - return nil, fmt.Errorf("errror retrieving container backing from host config manager: %+v", fault.VimFault()) - } - - // assemble env - var dockerEnv []string +// createSimHostMounts iterates over the provide filesystem mount info, creating docker volumes. It does _not_ delete volumes +// already created if creation of one fails. +// Returns: +// volume mounts: mount options suitable to pass directly to docker +// exec commands: a set of commands to run in the sim host after creation +// error: if construction of the above outputs fails +func createSimHostMounts(ctx *Context, containerName string, mounts []types.HostFileSystemMountInfo) ([]string, [][]string, error) { var dockerVol []string - var dockerNet []string var symlinkCmds [][]string - var err error - - hName := host.Summary.Config.Name - hUuid := host.Summary.Hardware.Uuid - containerName := constructContainerName(hName, hUuid) - - for i := range host.Config.FileSystemVolume.MountInfo { - info := &host.Config.FileSystemVolume.MountInfo[i] + for i := range mounts { + info := &mounts[i] name := info.Volume.GetHostFileSystemVolume().Name // NOTE: if we ever need persistence cross-invocation we can look at encoding the disk info as a label labels := []string{"name=" + name, "container=" + containerName, deleteWithContainer} dockerUuid, err := createVolume("", labels, nil) if err != nil { - return nil, err + return nil, nil, err } uuid := volumeIDtoHostVolumeUUID(dockerUuid) @@ -130,6 +109,10 @@ func createSimulationHost(ctx *Context, host *HostSystem) (*simHost, error) { } dockerVol = append(dockerVol, fmt.Sprintf("%s:/vmfs/volumes/%s:%s", dockerUuid, uuid, opt)) + + // create symlinks from /vmfs/volumes/ for the Volume Name - the direct mount (path) is only the uuid + // ? can we do this via a script in the ESX image instead of via exec? + // ? are the volume names exposed in any manner inside the host? They must be because these mounts exist but where does that come from? Chicken and egg problem? ConfigStore? symlinkCmds = append(symlinkCmds, []string{"ln", "-s", fmt.Sprintf("/vmfs/volumes/%s", uuid), fmt.Sprintf("/vmfs/volumes/%s", name)}) if strings.HasPrefix(name, "OSDATA") { symlinkCmds = append(symlinkCmds, []string{"mkdir", "-p", "/var/lib/vmware"}) @@ -137,54 +120,145 @@ func createSimulationHost(ctx *Context, host *HostSystem) (*simHost, error) { } } + return dockerVol, symlinkCmds, nil +} + +// createSimHostNetworks creates the networks for the host if not already created. Because we expect multiple hosts on the same network to act as a cluster +// it's likely that only the first host will create networks. +// This includes: +// * bridge network per-pNIC +// * bridge network per-DVS +// +// Returns: +// * array of networks to attach to +// * array of commands to run +// * error +func createSimHostNetworks(ctx *Context, containerName string, networkInfo *types.HostNetworkInfo, advOpts *OptionManager) ([]string, [][]string, error) { + var dockerNet []string + var cmds [][]string + + existingNets := make(map[string]string) + // a pnic does not have an IP so this is purely a connectivity statement, not a network identity, however this is not how docker works // so we're going to end up with a veth (our pnic) that does have an IP assigned. - // For now we're going to simply ignore that IP. //TODO: figure out whether we _need_ to do something with it. - for i := range host.Config.Network.Pnic { - pnicName := host.Config.Network.Pnic[i].Device + // For now we're going to simply ignore that IP. //TODO: figure out whether we _need_ to do something with it at this point + for i := range networkInfo.Pnic { + pnicName := networkInfo.Pnic[i].Device + + bridge := getPnicUnderlay(advOpts, pnicName) - queryRes := advOpts.QueryOptions(&types.QueryOptions{Name: advOptPrefixPnicToUnderlayPrefix + pnicName}).(*methods.QueryOptionsBody).Res - bridge := queryRes.Returnval[0].GetOptionValue().Value.(string) + if pnic, attached := existingNets[bridge]; attached { + return nil, nil, fmt.Errorf("cannot attach multiple pNICs to the same underlay: %s and %s both attempting to connect to %s for %s", pnic, pnicName, bridge, containerName) + } + + _, err := createBridge(bridge) + if err != nil { + return nil, nil, err + } dockerNet = append(dockerNet, bridge) + existingNets[bridge] = pnicName } - // determine the management - // TODO: add in vSwitches if we know them at this point - mgmtSwitch := "" - vmNet := "" - for _, vswitch := range host.Config.Network.Vswitch { - vmnic := vswitch.Spec.Policy.NicTeaming.NicOrder.ActiveNic[0] - switchName := vswitch.Name + return dockerNet, cmds, nil +} + +func getPnicUnderlay(advOpts *OptionManager, pnicName string) string { + queryRes := advOpts.QueryOptions(&types.QueryOptions{Name: advOptPrefixPnicToUnderlayPrefix + pnicName}).(*methods.QueryOptionsBody).Res + return queryRes.Returnval[0].GetOptionValue().Value.(string) +} - for _, pg := range vswitch.Portgroup { +// createSimulationHostcreates a simHost binding if the host.ConfigManager.AdvancedOption set contains a key "RUN.container". +// If the set does not contain that key, this returns nil. +// Methods on the simHost type are written to check for nil object so the return from this call can be blindly +// assigned and invoked without the caller caring about whether a binding for a backing container was warranted. +// +// The created simhost is based off of the details of the supplied host system. +// VMFS locations are created based on FileSystemMountInfo +// Bridge networks are created to simulate underlay networks - one per pNIC. You cannot connect two pNICs to the same underlay. +// +// On Network connectivity - initially this is using docker network constructs. This means we cannot easily use nested "ip netns" so we cannot +// have a perfect representation of the ESX structure: pnic(veth)->vswtich(bridge)->{vmk,vnic}(veth) +// Instead we have the following: +// * bridge network per underlay - everything connects directly to the underlay +// * VMs/CRXs connect to the underlay dictated by the Uplink pNIC attached to their vSwitch +// * hostd vmknic gets the "host" container IP - we don't currently support multiple vmknics with different IPs +// * no support for mocking VLANs +func createSimulationHost(ctx *Context, host *HostSystem) (*simHost, error) { + sh := &simHost{ + host: host, + } + advOpts := ctx.Map.Get(host.ConfigManager.AdvancedOption.Reference()).(*OptionManager) + fault := advOpts.QueryOptions(&types.QueryOptions{Name: "RUN.container"}).(*methods.QueryOptionsBody).Fault() + if fault != nil { + if _, ok := fault.VimFault().(*types.InvalidName); ok { + return nil, nil } + return nil, fmt.Errorf("errror retrieving container backing from host config manager: %+v", fault.VimFault()) + } + + // assemble env + var dockerEnv []string + var execCmds [][]string + + var err error + + hName := host.Summary.Config.Name + hUuid := host.Summary.Hardware.Uuid + containerName := constructContainerName(hName, hUuid) + + // create volumes and mounts + dockerVol, volCmds, err := createSimHostMounts(ctx, containerName, host.Config.FileSystemVolume.MountInfo) + if err != nil { + return nil, err } + execCmds = append(execCmds, volCmds...) - // if there's a DVS that doesn't have a bridge, create the bridge + // create networks + dockerNet, netCmds, err := createSimHostNetworks(ctx, containerName, host.Config.Network, advOpts) + if err != nil { + return nil, err + } + execCmds = append(execCmds, netCmds...) + // create the container sh.c, err = create(ctx, hName, hUuid, dockerNet, dockerVol, nil, dockerEnv, "alpine", []string{"sleep", "infinity"}) if err != nil { return nil, err } + // start the container err = sh.c.start(ctx) if err != nil { return nil, err } - // create symlinks from /vmfs/volumes/ for the Volume Name - the direct mount (path) is only the uuid - // ? can we do this via a script in the ESX image? are the volume names exposed in any manner instead the host? They must be because these mounts exist - // but where does that come from? Chicken and egg problem? ConfigStore? - for _, symlink := range symlinkCmds { - _, err := sh.c.exec(ctx, symlink) + // run post-creation steps + for _, cmd := range execCmds { + _, err := sh.c.exec(ctx, cmd) if err != nil { return nil, err } } + _, detail, err := sh.c.inspect() + + for i := range host.Config.Network.Pnic { + pnic := &host.Config.Network.Pnic[i] + bridge := getPnicUnderlay(advOpts, pnic.Device) + settings := detail.NetworkSettings.Networks[bridge] + + // it doesn't really make sense at an ESX level to set this information as IP bindings are associated with + // vnics (VMs) or vmknics (daemons such as hostd). + // However it's a useful location to stash this info in a manner that can be retrieved at a later date. + pnic.Spec.Ip.IpAddress = settings.IPAddress + pnic.Spec.Ip.SubnetMask = prefixToMask(settings.IPPrefixLen) + + pnic.Mac = settings.MacAddress + } + // TODO iterate over the following to update the IPs and MACs: // 1. host.Config.Network.Pnic // 2. host.Config.Network.Vnic diff --git a/simulator/container_host_system_test.go b/simulator/container_host_system_test.go index 86c293f7e..e69e95710 100644 --- a/simulator/container_host_system_test.go +++ b/simulator/container_host_system_test.go @@ -172,7 +172,7 @@ func TestHostContainerBacking(t *testing.T) { ctx := SpoofContext() hs := NewHostSystem(esx.HostSystem) - hs.configureContainerBacking(ctx, "alpine", defaultSimVolumes) + hs.configureContainerBacking(ctx, "alpine", defaultSimVolumes, "vcsim-mgmt-underlay") hs.configure(ctx, types.HostConnectSpec{}, true) diff --git a/simulator/host_system.go b/simulator/host_system.go index 1d33807b9..69a9518a4 100644 --- a/simulator/host_system.go +++ b/simulator/host_system.go @@ -141,7 +141,12 @@ func (h *HostSystem) configure(ctx *Context, spec types.HostConnectSpec, connect // image - the container image with which to simulate the host // mounts - array of mount info that should be translated into /vmfs/volumes/... mounts backed by container volumes // networks - names of bridges to use for underlays. Will create a pNIC for each. The first will be treated as the management network. -func (h *HostSystem) configureContainerBacking(ctx *Context, image string, mounts []types.HostFileSystemMountInfo, networks []string) { +// +// Restrictions adopted from createSimulationHost: +// * no mock of VLAN connectivity +// * only a single vmknic, used for "the management IP" +// * pNIC connectivity does not directly impact VMs/vmks using it as uplink +func (h *HostSystem) configureContainerBacking(ctx *Context, image string, mounts []types.HostFileSystemMountInfo, networks ...string) error { option := &types.OptionValue{ Key: advOptContainerBackingImage, Value: image, @@ -169,6 +174,22 @@ func (h *HostSystem) configureContainerBacking(ctx *Context, image string, mount // purge pNICs from the template - it makes no sense to keep them for a sim host h.Config.Network.Pnic = make([]types.PhysicalNic, len(networks)) + // purge any IPs and MACs associated with existing NetConfigs for the host + for cfgIdx := range h.Config.VirtualNicManagerInfo.NetConfig { + config := &h.Config.VirtualNicManagerInfo.NetConfig[cfgIdx] + for candidateIdx := range config.CandidateVnic { + candidate := &config.CandidateVnic[candidateIdx] + candidate.Spec.Ip.IpAddress = "0.0.0.0" + candidate.Spec.Ip.SubnetMask = "0.0.0.0" + candidate.Spec.Mac = "00:00:00:00:00:00" + } + } + + // The presence of a pNIC is used to indicate connectivity to a specific underlay. We construct an empty pNIC entry and specify the underly via + // host.ConfigManager.AdvancedOptions. The pNIC will be populated with the MAC (accurate) and IP (divergence - we need to stash it somewhere) for the veth. + // We create a NetConfig "management" entry for the first pNIC - this will be populated with the IP of the "host" container. + + // create a pNIC for each underlay for i, net := range networks { name := fmt.Sprintf("vmnic%d", i) @@ -226,135 +247,117 @@ func (h *HostSystem) configureContainerBacking(ctx *Context, image string, mount RdmaDevice: "", DpuId: "", } + } + + // TODO: construct management bindings - should we do this using public APIs now that host has pNICs? Or do we need + // to have a baseline set of entries first? + + // sanity check that everything's hung together sufficiently well + _, err := getNetConfigInterface(ctx, h, "management") + if err != nil { + return err + } + + return nil +} + +// netConfigDetails is used to packaged up all the related network entities associated with a NetConfig binding +type netConfigDetails struct { + nicType string + netconfig *types.VirtualNicManagerNetConfig + vmk *types.HostVirtualNic + netstack *types.HostNetStackInstance + portgroup *types.HostPortGroup + vswitch *types.HostVirtualSwitch + uplink *types.PhysicalNic +} + +// getNetConfigInterface returns the set of constructs active for a given nicType (eg. "management", "vmotion") +// This method is provided because the Config structure held by HostSystem is heavily interconnected but serialized and not cross-linked with pointers. +// As such there's a _lot_ of cross-referencing that needs to be done to navigate. +// The pNIC returned is the uplink associated with the vSwitch for the netconfig +func getNetConfigInterface(ctx *Context, host *HostSystem, nicType string) (*netConfigDetails, error) { + details := &netConfigDetails{ + nicType: nicType, + } + + for i := range host.Config.VirtualNicManagerInfo.NetConfig { + if host.Config.VirtualNicManagerInfo.NetConfig[i].NicType == nicType { + details.netconfig = &host.Config.VirtualNicManagerInfo.NetConfig[i] + break + } + } + if details.netconfig == nil { + return nil, fmt.Errorf("no matching NetConfig for NicType=%s", nicType) + } + + if details.netconfig.SelectedVnic == nil { + return details, nil + } - // there's too much repetition of state in host.Config.Network for it to be viable to use this directly - // as the config reference - it's a serialization of an interlinked data structure but without references. - // Outcomes: - // bridge - created for all vSwitch (host prefixed) and dvSwitch (not host scoped) - // host connection to bridge - if vNIC has port of type "host", connect to vSwitch/dvSwitch bridge - // TODO: validate this interpretation of "host" as port type. - // host is connected to mgmt network at minimum and "management" netConfig uses vNIC connected to mgmt net - // vNICs have IPs and MACs that correspond to the veths in the host container - // ? pNIC per vSwitch with uplink - this would be purely for data consistency in returned info, not for sim connectivity - - // Approach: - // Implement API methods for creating switches and vmknics. If we do this then it's the same path for the API live as used - // to process the template during host creation. - // 1. implement AddvSwitch method, and invoke it for each vswitch in the template - this creates bridge - // 2. implement AddVmkNic method, and invoke it for each in the template - this calls "connect" - - // vNIC connectivity is dictated within host by: - // In host: - // * on the same portgroup - // * on different portgroups with the same VLANID, on the same vSwitch - // Across hosts: - // * route to shared underlay, same VLANID - // ? delta is "does the vSwitch have an uplink?" - // ! cannot reconnect all containers to a different bridge if uplink is added as that will cause re-IP. - // > could mess around with ip tables rules to BLOCK comms and have all vNICs on same VLAN connect to the same bridge, but orchestration pain. - // ? have pnic as bridge, hosts connected bridges for pnic and vswitch. pnic added as uplink constructs iptables forwarding rules from vswitch bridge? - - - // !!!! uncertain of approach!! Draw it out in Miro. - - // 2023-04-28 - // * Create a bridge network per underlay (treat isolated logical networks as a separate underlay). - // * connect VMs/CRXs directly to the underlay bridge. Decide which underlays to connect to based on vNIC->vSwitch->Uplink. - // * Ignore VLAN for now. - // * Hosts connect to mgmt network - don't need to do anything else. - // * need a way to allow infravisor tests to connect hosts to an "overlay" network - - // underlay - vswitch - vlanID - - // first underlay is the management network - if i == 0 { - // 1. determine which vmk has the management service enabled - // 2. determine which vSwitch the mgmt vmk is connected to - // 3. ensure that vswitch has the mgmt vmnic as its uplink - mgmtVmkNic := "" - - // purge the IPs and MACs that are going to be backed by veths as the info will be incorrect - for cfgIdx := range h.Config.Network.VirtualNicManagerInfo.NetConfig { - config := &h.Config.Network.VirtualNicManagerInfo.NetConfig[cfgIdx] - for candidateIdx := range config.CandidateVnic { - candidate := &config.CandidateVnic[candidateIdx] - candidate.Spec.IP.IpAddress = "0.0.0.0" - candidate.Spec.IP.SubnetMask = "0.0.0.0" - candidate.Spec.Mac = "00:00:00:00:00:00" - - if config.NicType == "management" && candidate.Key == config.SelectedVnic[0] { - mgmtVmkNic = candidate.Device - } - } - } - - if mgmtVmkNic == "" { - panic("expected ESX to have a management vmknic") - } - - for vnicIdx := range h.Config.Network.VirtualNicManagerInfo.Vnic { - vnic := &h.Config.Network.VirtualNicManagerInfo.Vnic[vnicIdx] - vnic.Spec.IP.IpPAddress = "0.0.0.0" - vnic.Spec.IP.SubnetMask = "0.0.0.0" - vnic.Spec.Mac = "00:00:00:00:00:00" - - if config.NicType == "management" && candidate.Key == config.SelectedVnic[0] { - mgmtVmkNic = candidate.Device - } - } - } - - - VirtualNicManagerInfo: &types.HostVirtualNicManagerInfo{ - NetConfig: []types.VirtualNicManagerNetConfig{ - { - NicType: "management", - MultiSelectAllowed: true, - CandidateVnic: []types.HostVirtualNic{ - { - Device: mgmtVmkNic, - Key: "management.key-vim.host.VirtualNic-vmk1", - Portgroup: "Management Network", - Spec: types.HostVirtualNicSpec{ - Ip: &types.HostIpConfig{ - Dhcp: false, - IpAddress: "172.27.27.2", - SubnetMask: "255.255.0.0", - IpV6Config: &types.HostIpConfigIpV6AddressConfiguration{ - IpV6Address: []types.HostIpConfigIpV6Address{ - { - IpAddress: "fe80::250:56ff:fe65:d28a", - PrefixLength: 64, - Origin: "other", - DadState: "preferred", - Lifetime: (*time.Time)(nil), - Operation: "", - }, - }, - AutoConfigurationEnabled: types.NewBool(false), - DhcpV6Enabled: types.NewBool(false), - }, - }, - Mac: "00:50:56:65:d2:8a", - DistributedVirtualPort: (*types.DistributedVirtualSwitchPortConnection)(nil), - Portgroup: "Management Network", - Mtu: 1500, - TsoEnabled: types.NewBool(true), - NetStackInstanceKey: "defaultTcpipStack", - OpaqueNetwork: (*types.HostVirtualNicOpaqueNetworkSpec)(nil), - ExternalId: "", - PinnedPnic: "", - IpRouteSpec: (*types.HostVirtualNicIpRouteSpec)(nil), - SystemOwned: types.NewBool(false), - DpuId: "", - }, - Port: "", - }, - }, - SelectedVnic: []string{"management.key-vim.host.VirtualNic-vmk1"}, - }, + vnicKey := details.netconfig.SelectedVnic[0] + for i := range details.netconfig.CandidateVnic { + if details.netconfig.CandidateVnic[i].Key == vnicKey { + details.vmk = &details.netconfig.CandidateVnic[i] + break } } + if details.vmk == nil { + panic(fmt.Sprintf("NetConfig for host %s references non-existant vNIC key %s for %s nicType", host.Name, vnicKey, nicType)) + } + + portgroupName := details.vmk.Portgroup + netstackKey := details.vmk.Spec.NetStackInstanceKey + + for i := range host.Config.Network.NetStackInstance { + if host.Config.Network.NetStackInstance[i].Key == netstackKey { + details.netstack = &host.Config.Network.NetStackInstance[i] + break + } + } + if details.netstack == nil { + panic(fmt.Sprintf("NetConfig for host %s references non-existant NetStack key %s for %s nicType", host.Name, netstackKey, nicType)) + } + + for i := range host.Config.Network.Portgroup { + // TODO: confirm correctness of this - seems weird it references the Spec.Name instead of the key like everything else. + if host.Config.Network.Portgroup[i].Spec.Name == portgroupName { + details.portgroup = &host.Config.Network.Portgroup[i] + break + } + } + if details.portgroup == nil { + panic(fmt.Sprintf("NetConfig for host %s references non-existant PortGroup name %s for %s nicType", host.Name, portgroupName, nicType)) + } + + vswitchKey := details.portgroup.Vswitch + for i := range host.Config.Network.Vswitch { + if host.Config.Network.Vswitch[i].Key == vswitchKey { + details.vswitch = &host.Config.Network.Vswitch[i] + break + } + } + if details.vswitch == nil { + panic(fmt.Sprintf("NetConfig for host %s references non-existant vSwitch key %s for %s nicType", host.Name, vswitchKey, nicType)) + } + + if len(details.vswitch.Pnic) != 1 { + // to change this, look at the Active NIC in the NicTeamingPolicy, but for now not worth it + panic(fmt.Sprintf("vSwitch %s for host %s has multiple pNICs associated which is not supported.", vswitchKey, host.Name)) + } + + pnicKey := details.vswitch.Pnic[0] + for i := range host.Config.Network.Pnic { + if host.Config.Network.Pnic[i].Key == pnicKey { + details.uplink = &host.Config.Network.Pnic[i] + break + } + } + if details.uplink == nil { + panic(fmt.Sprintf("NetConfig for host %s references non-existant pNIC key %s for %s nicType", host.Name, pnicKey, nicType)) + } + + return details, nil } func (h *HostSystem) event() types.HostEvent {