Skip to content

Commit

Permalink
refactor: remove redis
Browse files Browse the repository at this point in the history
  • Loading branch information
hacdias committed Sep 24, 2021
1 parent cfa9277 commit 2912ca8
Show file tree
Hide file tree
Showing 18 changed files with 430 additions and 1,335 deletions.
2 changes: 1 addition & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -15,4 +15,4 @@ docker:
docker build -t iptestground/sync-service:latest -f Dockerfile .

test:
go test ./...
go test -v ./...
108 changes: 108 additions & 0 deletions barrier.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,108 @@
package sync

import (
"context"
"sync"
)

// barrier represents a single barrier with multiple waiters.
type barrier struct {
sync.Mutex
count int
zcs []*zeroCounter
}

// wait waits for the barrier to reach a certain target.
func (b *barrier) wait(ctx context.Context, target int) error {
b.Lock()

// If we're already over the target, return immediately.
if target <= b.count {
b.Unlock()
return nil
}

// Create a zero counter to wait for target - count elements to signal entry.
// It also returns if the context fires.
zc := newZeroCounter(ctx, target-b.count)

// TODO: Or do a simpler way where we just pool every 100ms to see if count has
// the correct value or ctx fired. Less code and less complexity.
b.zcs = append(b.zcs, zc)
b.Unlock()
return zc.wait()
}

// inc increments the barrier by one unit. To do so, we increment
// the counter and tell all the channels we received a new entry.
func (b *barrier) inc() int {
b.Lock()
defer b.Unlock()

b.count += 1
count := b.count

for _, zc := range b.zcs {
zc.dec()
}

return count
}

// isDone returns true if all the counters for this barrier have reached zero.
func (b *barrier) isDone() bool {
b.Lock()
defer b.Unlock()

for _, zc := range b.zcs {
if !zc.done() {
return false
}
}

return true
}

type zeroCounter struct {
sync.Mutex
ctx context.Context
ch chan struct{}
closed bool
count int
}

func newZeroCounter(ctx context.Context, target int) *zeroCounter {
return &zeroCounter{
count: target,
ctx: ctx,
ch: make(chan struct{}),
}
}

func (w *zeroCounter) dec() {
w.Lock()
defer w.Unlock()

if w.closed {
return
}

w.count -= 1
if w.count <= 0 {
w.closed = true
close(w.ch)
}
}

func (w *zeroCounter) wait() error {
select {
case <-w.ctx.Done():
return w.ctx.Err()
case <-w.ch:
return nil
}
}

func (w *zeroCounter) done() bool {
return w.closed
}
11 changes: 1 addition & 10 deletions cmd/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -23,24 +23,15 @@ func run() error {
ctx, cancel := context.WithCancel(cmd.ProcessContext())
defer cancel()

redisHost := os.Getenv(sync.EnvRedisHost)
if redisHost == "" {
redisHost = sync.DefaultRedisHost
}

log := logging.S()
if os.Getenv("DEBUG") == "true" {
logging.SetLevel(zapcore.DebugLevel)
}

service, err := sync.NewRedisService(ctx, log, &sync.RedisConfiguration{
Port: 6379,
Host: redisHost,
})
service, err := sync.NewDefaultService(ctx, log)
if err != nil {
return err
}
service.EnableBackgroundGC(nil)

srv, err := sync.NewServer(service, 5050)
if err != nil {
Expand Down
2 changes: 1 addition & 1 deletion connection_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ import (
)

func getConnWithInMemService(ctx context.Context) *connection {
service, _ := getInMemService(ctx)
service, _ := getDefaultService(ctx)

conn := &connection{
service: service,
Expand Down
1 change: 0 additions & 1 deletion go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,6 @@ module github.com/testground/sync-service
go 1.16

require (
github.com/go-redis/redis/v7 v7.4.0
github.com/google/uuid v1.1.1
github.com/hashicorp/go-multierror v1.1.0
github.com/testground/testground v0.5.3
Expand Down
118 changes: 0 additions & 118 deletions inmem.go

This file was deleted.

46 changes: 0 additions & 46 deletions inmem_test.go

This file was deleted.

Loading

0 comments on commit 2912ca8

Please sign in to comment.