Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -211,6 +211,7 @@ require (
github.com/butuzov/mirror v1.3.0 // indirect
github.com/catenacyber/perfsprint v0.9.1 // indirect
github.com/ccojocar/zxcvbn-go v1.0.4 // indirect
github.com/cenkalti/backoff/v5 v5.0.3 // indirect
github.com/certifi/gocertifi v0.0.0-20210507211836-431795d63e8d // indirect
github.com/charithe/durationcheck v0.0.10 // indirect
github.com/charmbracelet/colorprofile v0.2.3-0.20250311203215-f60798e515dc // indirect
Expand Down
2 changes: 2 additions & 0 deletions go.sum
Original file line number Diff line number Diff line change
Expand Up @@ -1534,6 +1534,8 @@ github.com/ccoveille/go-safecast v1.6.1 h1:Nb9WMDR8PqhnKCVs2sCB+OqhohwO5qaXtCviZ
github.com/ccoveille/go-safecast v1.6.1/go.mod h1:QqwNjxQ7DAqY0C721OIO9InMk9zCwcsO7tnRuHytad8=
github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8=
github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
github.com/cenkalti/backoff/v5 v5.0.3 h1:ZN+IMa753KfX5hd8vVaMixjnqRZ3y8CuJKRKj1xcsSM=
github.com/cenkalti/backoff/v5 v5.0.3/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/census-instrumentation/opencensus-proto v0.3.0/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw=
Expand Down
21 changes: 12 additions & 9 deletions internal/datastore/common/gc.go
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ import (
"fmt"
"time"

"github.com/cenkalti/backoff/v4"
"github.com/cenkalti/backoff/v5"
"github.com/prometheus/client_golang/prometheus"
"github.com/rs/zerolog"

Expand Down Expand Up @@ -148,14 +148,17 @@ var MaxGCInterval = 60 * time.Minute
// StartGarbageCollector loops forever until the context is canceled and
// performs garbage collection on the provided interval.
func StartGarbageCollector(ctx context.Context, collectable GarbageCollectableDatastore, interval, window, timeout time.Duration) error {
return startGarbageCollectorWithMaxElapsedTime(ctx, collectable, interval, window, 0, timeout, gcFailureCounter)
return runPeriodicallyWithBackoff(ctx, func() error {
gcCtx, cancel := context.WithTimeout(ctx, timeout)
defer cancel()
return RunGarbageCollection(gcCtx, collectable, window)
}, interval, window, timeout, gcFailureCounter)
}

func startGarbageCollectorWithMaxElapsedTime(ctx context.Context, collectable GarbageCollectableDatastore, interval, window, maxElapsedTime, timeout time.Duration, failureCounter prometheus.Counter) error {
func runPeriodicallyWithBackoff(ctx context.Context, taskFn func() error, interval, window, timeout time.Duration, failureCounter prometheus.Counter) error {
backoffInterval := backoff.NewExponentialBackOff()
backoffInterval.InitialInterval = interval
backoffInterval.MaxInterval = max(MaxGCInterval, interval)
backoffInterval.MaxElapsedTime = maxElapsedTime
backoffInterval.Reset()

nextInterval := interval
Expand All @@ -178,7 +181,10 @@ func startGarbageCollectorWithMaxElapsedTime(ctx context.Context, collectable Ga
Dur("timeout", timeout).
Msg("running garbage collection worker")

err := RunGarbageCollection(collectable, window, timeout)
// NOTE: we're okay using the parent context here because the
// callers of this function create a dedicated garbage collection
// context anyway, which is only cancelled when the ds is closed.
err := taskFn()
if err != nil {
failureCounter.Inc()
nextInterval = backoffInterval.NextBackOff()
Expand All @@ -199,10 +205,7 @@ func startGarbageCollectorWithMaxElapsedTime(ctx context.Context, collectable Ga
}

// RunGarbageCollection runs garbage collection for the datastore.
func RunGarbageCollection(collectable GarbageCollectableDatastore, window, timeout time.Duration) error {
ctx, cancel := context.WithTimeout(context.Background(), timeout)
defer cancel()

func RunGarbageCollection(ctx context.Context, collectable GarbageCollectableDatastore, window time.Duration) error {
ctx, span := tracer.Start(ctx, "RunGarbageCollection")
defer span.End()

Expand Down
123 changes: 68 additions & 55 deletions internal/datastore/common/gc_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,15 +2,18 @@ package common

import (
"context"
"errors"
"fmt"
"slices"
"sync"
"testing"
"testing/synctest"
"time"

"github.com/prometheus/client_golang/prometheus"
promclient "github.com/prometheus/client_model/go"
"github.com/stretchr/testify/require"
"go.uber.org/goleak"

"github.com/authzed/spicedb/internal/datastore/revisions"
"github.com/authzed/spicedb/pkg/datastore"
Expand Down Expand Up @@ -181,50 +184,45 @@ func (d revisionErrorDeleter) DeleteExpiredRels() (int64, error) {
return 0, nil
}

func alwaysErr() error {
return errors.New("aaagh")
}

func TestGCFailureBackoff(t *testing.T) {
t.Cleanup(func() {
goleak.VerifyNone(t)
})
localCounter := prometheus.NewCounter(gcFailureCounterConfig)
reg := prometheus.NewRegistry()
require.NoError(t, reg.Register(localCounter))

ctx, cancel := context.WithCancel(t.Context())
defer cancel()
go func() {
gc := newFakeGCStore(alwaysErrorDeleter{})
require.Error(t, startGarbageCollectorWithMaxElapsedTime(ctx, gc, 100*time.Millisecond, 1*time.Second, 1*time.Nanosecond, 1*time.Minute, localCounter))
}()
time.Sleep(200 * time.Millisecond)
cancel()
errCh := make(chan error, 1)
synctest.Test(t, func(t *testing.T) {
duration := 1000 * time.Second
ctx, cancel := context.WithTimeout(t.Context(), duration)
t.Cleanup(func() {
cancel()
})
go func() {
errCh <- runPeriodicallyWithBackoff(ctx, alwaysErr, 100*time.Second, 1*time.Second, 1*time.Minute, localCounter)
}()
time.Sleep(duration)
synctest.Wait()
})
require.Error(t, <-errCh)

metrics, err := reg.Gather()
require.NoError(t, err)
var mf *promclient.MetricFamily
for _, metric := range metrics {
if metric.GetName() == "spicedb_datastore_gc_failure_total" {
mf = metric
break
}
}
require.Greater(t, *(mf.GetMetric()[0].Counter.Value), 100.0, "MaxElapsedTime=1ns did not cause backoff to get ignored")

localCounter = prometheus.NewCounter(gcFailureCounterConfig)
reg = prometheus.NewRegistry()
require.NoError(t, reg.Register(localCounter))
ctx, cancel = context.WithCancel(t.Context())
defer cancel()
go func() {
gc := newFakeGCStore(alwaysErrorDeleter{})
require.Error(t, startGarbageCollectorWithMaxElapsedTime(ctx, gc, 100*time.Millisecond, 0, 1*time.Second, 1*time.Minute, localCounter))
}()
time.Sleep(200 * time.Millisecond)
cancel()

metrics, err = reg.Gather()
require.NoError(t, err)
for _, metric := range metrics {
if metric.GetName() == "spicedb_datastore_gc_failure_total" {
mf = metric
}
}
require.Less(t, *(mf.GetMetric()[0].Counter.Value), 3.0, "MaxElapsedTime=0 should have not caused backoff to get ignored")
// We expect about 5 failures; the behavior of the library means that there's some wiggle room here.
// (owing to the jitter in the backoff)
require.Greater(t, *(mf.GetMetric()[0].Counter.Value), 3.0, "did not see expected number of backoffs")
}

// Ensure the garbage collector interval is reset after recovering from an
Expand All @@ -238,19 +236,25 @@ func TestGCFailureBackoffReset(t *testing.T) {
errorOnRevisions: []uint64{1, 2, 3, 4, 5},
})

ctx, cancel := context.WithCancel(t.Context())
defer cancel()

go func() {
interval := 10 * time.Millisecond
window := 10 * time.Second
timeout := 1 * time.Minute

require.Error(t, StartGarbageCollector(ctx, gc, interval, window, timeout))
}()
errCh := make(chan error, 1)
synctest.Test(t, func(t *testing.T) {
ctx, cancel := context.WithCancel(t.Context())
t.Cleanup(func() {
cancel()
})
go func() {
interval := 10 * time.Millisecond
window := 10 * time.Second
timeout := 1 * time.Minute

errCh <- StartGarbageCollector(ctx, gc, interval, window, timeout)
}()
time.Sleep(500 * time.Millisecond)
cancel()
synctest.Wait()
})

time.Sleep(500 * time.Millisecond)
cancel()
require.Error(t, <-errCh)

// The next interval should have been reset after recovering from the error.
// If it is not reset, the last exponential backoff interval will not give
Expand All @@ -264,20 +268,29 @@ func TestGCFailureBackoffReset(t *testing.T) {
func TestGCUnlockOnTimeout(t *testing.T) {
gc := newFakeGCStore(alwaysErrorDeleter{})

ctx, cancel := context.WithCancel(t.Context())
defer cancel()

go func() {
interval := 10 * time.Millisecond
window := 10 * time.Second
timeout := 1 * time.Millisecond

require.Error(t, StartGarbageCollector(ctx, gc, interval, window, timeout))
}()

time.Sleep(30 * time.Millisecond)
require.False(t, gc.HasGCRun(), "GC should not have run")
errCh := make(chan error, 1)
hasRunChan := make(chan bool, 1)
synctest.Test(t, func(t *testing.T) {
ctx, cancel := context.WithCancel(t.Context())
t.Cleanup(func() {
cancel()
})
go func() {
interval := 10 * time.Millisecond
window := 10 * time.Second
timeout := 1 * time.Minute

errCh <- StartGarbageCollector(ctx, gc, interval, window, timeout)
}()
time.Sleep(30 * time.Millisecond)
hasRunChan <- gc.HasGCRun()
cancel()
synctest.Wait()
})
require.Error(t, <-errCh)
require.False(t, <-hasRunChan, "GC should not have run")

// TODO: should this be inside the goroutine as well?
gc.fakeGC.lock.Lock()
defer gc.fakeGC.lock.Unlock()

Expand Down
Loading