Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: local backend #20

Open
wants to merge 2 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
32 changes: 27 additions & 5 deletions cmd/sunlight/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -80,7 +80,7 @@ type Config struct {
// The database must already exist to protect against accidental
// misconfiguration. Create the table with:
//
// $ sqlite3 checkpoints.db "CREATE TABLE checkpoints (logID BLOB PRIMARY KEY, checkpoint TEXT)"
// $ sqlite3 checkpoints.db "CREATE TABLE checkpoints (logID BLOB PRIMARY KEY, body TEXT)"
//
Checkpoints string

Expand Down Expand Up @@ -181,6 +181,14 @@ type LogConfig struct {
// going to be treated like a directory in many tools using S3.
S3KeyPrefix string

// LocalBackend is the path to the directory where backend data is going to
// be saved.
//
// Not meant to be used in production, only for testing and development purposes.
//
// Cannot be used at the same time as the S3 bucket.
LocalBackend string

// NotAfterStart is the start of the validity range for certificates
// accepted by this log instance, as and RFC 3339 date.
NotAfterStart string
Expand Down Expand Up @@ -306,10 +314,24 @@ func main() {
slog.String("log", lc.ShortName),
}))

b, err := ctlog.NewS3Backend(ctx, lc.S3Region, lc.S3Bucket, lc.S3Endpoint, lc.S3KeyPrefix, logger)
if err != nil {
logger.Error("failed to create backend", "err", err)
os.Exit(1)
var b ctlog.Backend
if lc.LocalBackend != "" {
if lc.S3Bucket != "" || lc.S3Region != "" || lc.S3Endpoint != "" || lc.S3KeyPrefix != "" {
logger.Error("local backend cannot be used with S3")
os.Exit(1)
}

b, err = ctlog.NewLocalBackend(lc.LocalBackend)
if err != nil {
logger.Error("failed to create backend", "err", err)
os.Exit(1)
}
} else {
b, err = ctlog.NewS3Backend(ctx, lc.S3Region, lc.S3Bucket, lc.S3Endpoint, lc.S3KeyPrefix, logger)
if err != nil {
logger.Error("failed to create backend", "err", err)
os.Exit(1)
}
}

r := x509util.NewPEMCertPool()
Expand Down
80 changes: 80 additions & 0 deletions internal/ctlog/local.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,80 @@
package ctlog

import (
"context"
"encoding/base64"
"fmt"
"os"
"path"
"sync"

"github.com/prometheus/client_golang/prometheus"
)

// LocalFilesystemBackend is a backend that stores key-value pairs in the
// local filesystem.
// The keys are base64-encoded and the values are stored in files named after
// the base64-encoded key.
//
// This is not meant to be used in production, but rather for testing and
// development purposes.
type LocalFilesystemBackend struct {
mu *sync.RWMutex
rootPath string
}

func NewLocalBackend(path string) (*LocalFilesystemBackend, error) {
if _, err := os.Stat(path); err != nil {
if os.IsNotExist(err) {
if err = os.MkdirAll(path, 0755); err != nil {
return nil, fmt.Errorf("failed to create directory %s: %w", path, err)
}
} else {
return nil, fmt.Errorf("failed to check if directory %s exists: %w", path, err)
}
}

return &LocalFilesystemBackend{
mu: &sync.RWMutex{},
rootPath: path,
}, nil
}

// Upload saves the data associated to the key in the local filesystem.
// Note well: upload options are not handled
func (b *LocalFilesystemBackend) Upload(ctx context.Context, key string, data []byte, opts *UploadOptions) error {
b.mu.Lock()
defer b.mu.Unlock()
filename := keyToFilename(b.rootPath, key)
f, err := os.Create(filename)
if err != nil {
return fmt.Errorf("failed to save key %s to file %s: %w", key, filename, err)
}
defer f.Close()
if _, err := f.Write(data); err != nil {
return fmt.Errorf("failed to write contents of key %s to file %s: %w", key, filename, err)
}
return nil
}

func (b *LocalFilesystemBackend) Fetch(ctx context.Context, key string) ([]byte, error) {
b.mu.RLock()
defer b.mu.RUnlock()

filename := keyToFilename(b.rootPath, key)

data, err := os.ReadFile(filename)
if err != nil {
return []byte{}, fmt.Errorf("failed to read contents of file %s associated to key %s: %w", filename, key, err)
}

return data, nil
}

func (b *LocalFilesystemBackend) Metrics() []prometheus.Collector {
return []prometheus.Collector{}
}

func keyToFilename(rootPath, key string) string {
return path.Join(rootPath, base64.StdEncoding.EncodeToString([]byte(key)))
}
5 changes: 3 additions & 2 deletions internal/ctlog/sqlite.go
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ func NewSQLiteBackend(ctx context.Context, path string, l *slog.Logger) (*SQLite

conn, err := sqlite.OpenConn(path, sqlite.OpenFlagsDefault & ^sqlite.SQLITE_OPEN_CREATE)
if err != nil {
return nil, fmt.Errorf(`failed to open SQLite lock database (hint: to avoid misconfiguration, the lock database must be created manually with "CREATE TABLE checkpoints (logID BLOB PRIMARY KEY, checkpoint TEXT)"): %w`, err)
return nil, fmt.Errorf(`failed to open SQLite lock database (hint: to avoid misconfiguration, the lock database must be created manually with "CREATE TABLE checkpoints (logID BLOB PRIMARY KEY, body TEXT)"): %w`, err)
}
if err := sqlitex.ExecTransient(conn, "PRAGMA synchronous = FULL", nil); err != nil {
conn.Close()
Expand All @@ -49,6 +49,7 @@ func NewSQLiteBackend(ctx context.Context, path string, l *slog.Logger) (*SQLite
conn: conn,
duration: duration,
log: l,
mu: &sync.Mutex{},
}, nil
}

Expand Down Expand Up @@ -78,7 +79,7 @@ func (b *SQLiteBackend) Fetch(ctx context.Context, logID [sha256.Size]byte) (Loc
if body == nil {
return nil, errors.New("checkpoint not found")
}
return &dynamoDBCheckpoint{logID: logID, body: body}, nil
return &sqliteCheckpoint{logID: logID, body: body}, nil
}

func (b *SQLiteBackend) Replace(ctx context.Context, old LockedCheckpoint, new []byte) (LockedCheckpoint, error) {
Expand Down