Skip to content

Commit

Permalink
Trigger additional dns probes on peer conn status
Browse files Browse the repository at this point in the history
  • Loading branch information
hurricanehrndz committed Aug 16, 2024
1 parent 6016d2f commit 7b40359
Show file tree
Hide file tree
Showing 4 changed files with 178 additions and 73 deletions.
5 changes: 3 additions & 2 deletions client/internal/dns/host.go
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@ func newNoopHostMocker() hostManager {
}
}

func dnsConfigToHostDNSConfig(dnsConfig nbdns.Config, ip string, port int) HostDNSConfig {
func dnsConfigToHostDNSConfig(dnsConfig nbdns.Config, ip string, port int, connectedPeers int) HostDNSConfig {
config := HostDNSConfig{
RouteAll: false,
ServerIP: ip,
Expand All @@ -88,13 +88,14 @@ func dnsConfigToHostDNSConfig(dnsConfig nbdns.Config, ip string, port int) HostD
if len(nsConfig.NameServers) == 0 {
continue
}
if nsConfig.Primary {
if nsConfig.Primary && connectedPeers != 0 {
config.RouteAll = true
}

for _, domain := range nsConfig.Domains {
config.Domains = append(config.Domains, DomainConfig{
Domain: strings.TrimSuffix(domain, "."),
Disabled: connectedPeers == 0,
MatchOnly: !nsConfig.SearchDomainsEnabled,
})
}
Expand Down
35 changes: 17 additions & 18 deletions client/internal/dns/server.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@ import (
"fmt"
"net/netip"
"runtime"
"slices"
"strings"
"sync"

Expand Down Expand Up @@ -116,7 +117,7 @@ func NewDefaultServerPermanentUpstream(
ds.hostsDNSHolder.set(hostsDnsList)
ds.permanent = true
ds.addHostRootZone()
ds.currentConfig = dnsConfigToHostDNSConfig(config, ds.service.RuntimeIP(), ds.service.RuntimePort())
ds.currentConfig = dnsConfigToHostDNSConfig(config, ds.service.RuntimeIP(), ds.service.RuntimePort(), 1)
ds.searchDomainNotifier = newNotifier(ds.SearchDomains())
ds.searchDomainNotifier.setListener(listener)
setServerDns(ds)
Expand Down Expand Up @@ -305,11 +306,18 @@ func (s *DefaultServer) applyConfiguration(update nbdns.Config) error {
if err != nil {
return fmt.Errorf("not applying dns update, error: %v", err)
}
muxUpdates := append(localMuxUpdates, upstreamMuxUpdates...) //nolint:gocritic

var muxUpdates []muxUpdate
if s.statusRecorder.GetConnectedPeersCount() == 0 {
log.Infof("O connected peers, not registering upstream handlers")
muxUpdates = localMuxUpdates
} else {
muxUpdates = append(localMuxUpdates, upstreamMuxUpdates...) //nolint:gocritic
}

s.updateMux(muxUpdates)
s.updateLocalResolver(localRecords)
s.currentConfig = dnsConfigToHostDNSConfig(update, s.service.RuntimeIP(), s.service.RuntimePort())
s.currentConfig = dnsConfigToHostDNSConfig(update, s.service.RuntimeIP(), s.service.RuntimePort(), s.statusRecorder.GetConnectedPeersCount())

hostUpdate := s.currentConfig
if s.service.RuntimePort() != defaultPort && !s.hostManager.supportCustomPort() {
Expand Down Expand Up @@ -359,8 +367,8 @@ func (s *DefaultServer) buildLocalHandlerUpdate(customZones []nbdns.CustomZone)
}

func (s *DefaultServer) buildUpstreamHandlerUpdate(nameServerGroups []*nbdns.NameServerGroup) ([]muxUpdate, error) {

var muxUpdates []muxUpdate
log.Infof("length of nameServerGroups %d", len(nameServerGroups))
for _, nsGroup := range nameServerGroups {
if len(nsGroup.NameServers) == 0 {
log.Warn("received a nameserver group with empty nameserver list")
Expand Down Expand Up @@ -495,29 +503,22 @@ func (s *DefaultServer) upstreamCallbacks(
nsGroup *nbdns.NameServerGroup,
handler dns.Handler,
) (deactivate func(error), reactivate func()) {
var removeIndex map[string]int
deactivate = func(err error) {
s.mux.Lock()
defer s.mux.Unlock()

l := log.WithField("nameservers", nsGroup.NameServers)
l.Info("Temporarily deactivating nameservers group due to timeout")

removeIndex = make(map[string]int)
for _, domain := range nsGroup.Domains {
removeIndex[domain] = -1
}
if nsGroup.Primary {
removeIndex[nbdns.RootZone] = -1
s.currentConfig.RouteAll = false
s.service.DeregisterMux(nbdns.RootZone)
}

for i, item := range s.currentConfig.Domains {
if _, found := removeIndex[item.Domain]; found {
if slices.Contains(nsGroup.Domains, item.Domain) {
s.currentConfig.Domains[i].Disabled = true
s.service.DeregisterMux(item.Domain)
removeIndex[item.Domain] = i
}
}

Expand All @@ -530,18 +531,16 @@ func (s *DefaultServer) upstreamCallbacks(
}

s.updateNSState(nsGroup, err, false)

}
reactivate = func() {
s.mux.Lock()
defer s.mux.Unlock()

for domain, i := range removeIndex {
if i == -1 || i >= len(s.currentConfig.Domains) || s.currentConfig.Domains[i].Domain != domain {
continue
for i, item := range s.currentConfig.Domains {
if slices.Contains(nsGroup.Domains, item.Domain) {
s.currentConfig.Domains[i].Disabled = false
s.service.RegisterMux(item.Domain, handler)
}
s.currentConfig.Domains[i].Disabled = false
s.service.RegisterMux(domain, handler)
}

l := log.WithField("nameservers", nsGroup.NameServers)
Expand Down
139 changes: 105 additions & 34 deletions client/internal/dns/upstream.go
Original file line number Diff line number Diff line change
Expand Up @@ -56,14 +56,101 @@ type upstreamResolverBase struct {
func newUpstreamResolverBase(ctx context.Context, statusRecorder *peer.Status) *upstreamResolverBase {
ctx, cancel := context.WithCancel(ctx)

return &upstreamResolverBase{
resolverBase := &upstreamResolverBase{
ctx: ctx,
cancel: cancel,
upstreamTimeout: upstreamTimeout,
reactivatePeriod: reactivatePeriod,
failsTillDeact: failsTillDeact,
statusRecorder: statusRecorder,
}

go resolverBase.watchPeersConnStatusChanges()

return resolverBase
}

func (u *upstreamResolverBase) watchPeersConnStatusChanges() {
var probeRunning atomic.Bool
var cancelBackOff context.CancelFunc

exponentialBackOff := &backoff.ExponentialBackOff{
InitialInterval: 200 * time.Millisecond,
RandomizationFactor: 0.5,
Multiplier: 1.1,
MaxInterval: 5 * time.Second,
MaxElapsedTime: 15 * time.Second,
Stop: backoff.Stop,
Clock: backoff.SystemClock,
}
operation := func() error {
select {
case <-u.ctx.Done():
return backoff.Permanent(fmt.Errorf("exiting upstream retry loop for upstreams %s: parent context : %s", u.upstreamServers, u.ctx.Err()))
default:
}

u.probeAvailability()
if u.disabled {
return fmt.Errorf("probe faled")
}
return nil
}

continualProbe := func() {
// probe continually for 30s when peer count >= 1
if u.statusRecorder.GetConnectedPeersCount() == 0 {
log.Debug("O peer connected, running one more DNS probe")
// cancel backoff operation
if cancelBackOff != nil {
cancelBackOff()
cancelBackOff = nil
}
u.probeAvailability()
return
}

if probeRunning.Load() {
log.Info("restarting DNS probing")
cancelBackOff()
cancelBackOff = nil
}
defer func() {
u.mutex.Lock()
log.Infof("DNS probing finished, servers %s disabled: %t", u.upstreamServers, u.disabled)
u.mutex.Unlock()
probeRunning.Store(false)
}()
probeRunning.Store(true)

ctx, cancel := context.WithCancel(context.Background())
cancelBackOff = cancel
err := backoff.Retry(func() error {
select {
case <-ctx.Done():
log.Warn("DNS probing cancelled")
return backoff.Permanent(ctx.Err())
default:
return operation()
}
}, backoff.WithContext(exponentialBackOff, ctx))
cancelBackOff = nil
if err != nil {
log.Warn("DNS probe trigger by peer connection failed")
u.disable(err)
return
}
}

for {
select {
case <-u.ctx.Done():
return
case <-u.statusRecorder.GetPeersConnStatusChangeNotifier():
log.Debugf("probing DNS availability on/off for 30s")
go continualProbe()
}
}
}

func (u *upstreamResolverBase) stop() {
Expand Down Expand Up @@ -163,7 +250,7 @@ func (u *upstreamResolverBase) checkUpstreamFails(err error) {
}

// probeAvailability tests all upstream servers simultaneously and
// disables the resolver if none work
// disables/enable the resolver
func (u *upstreamResolverBase) probeAvailability() {
u.mutex.Lock()
defer u.mutex.Unlock()
Expand All @@ -174,11 +261,6 @@ func (u *upstreamResolverBase) probeAvailability() {
default:
}

// avoid probe if upstreams could resolve at least one query and fails count is less than failsTillDeact
if u.successCount.Load() > 0 && u.failsCount.Load() < u.failsTillDeact {
return
}

var success bool
var mu sync.Mutex
var wg sync.WaitGroup
Expand All @@ -190,7 +272,7 @@ func (u *upstreamResolverBase) probeAvailability() {
wg.Add(1)
go func() {
defer wg.Done()
err := u.testNameserver(upstream, 500*time.Millisecond)
err := u.testNameserver(upstream, probeTimeout)
if err != nil {
errors = multierror.Append(errors, err)
log.Warnf("probing upstream nameserver %s: %s", upstream, err)
Expand All @@ -208,6 +290,15 @@ func (u *upstreamResolverBase) probeAvailability() {
// didn't find a working upstream server, let's disable and try later
if !success {
u.disable(errors.ErrorOrNil())
return
}

if u.disabled {
log.Infof("upstreams %s are responsive again. Adding them back to system", u.upstreamServers)
u.failsCount.Store(0)
u.successCount.Add(1)
u.reactivate()
u.disabled = false
}
}

Expand All @@ -223,37 +314,17 @@ func (u *upstreamResolverBase) waitUntilResponse() {
Clock: backoff.SystemClock,
}

operation := func() error {
select {
case <-u.ctx.Done():
return backoff.Permanent(fmt.Errorf("exiting upstream retry loop for upstreams %s: parent context has been canceled", u.upstreamServers))
default:
}

for _, upstream := range u.upstreamServers {
if err := u.testNameserver(upstream, probeTimeout); err != nil {
log.Tracef("upstream check for %s: %s", upstream, err)
} else {
// at least one upstream server is available, stop probing
return nil
}
err := backoff.Retry(func() error {
u.probeAvailability()
if u.disabled {
return fmt.Errorf("failed to enable upsstream")
}

log.Tracef("checking connectivity with upstreams %s failed. Retrying in %s", u.upstreamServers, exponentialBackOff.NextBackOff())
return fmt.Errorf("upstream check call error")
}

err := backoff.Retry(operation, exponentialBackOff)
return nil
}, exponentialBackOff)
if err != nil {
log.Warn(err)
return
}

log.Infof("upstreams %s are responsive again. Adding them back to system", u.upstreamServers)
u.failsCount.Store(0)
u.successCount.Add(1)
u.reactivate()
u.disabled = false
}

// isTimeout returns true if the given error is a network timeout error.
Expand Down
Loading

0 comments on commit 7b40359

Please sign in to comment.