diff --git a/Earthfile b/Earthfile index d6b6fe12b..69fea3850 100644 --- a/Earthfile +++ b/Earthfile @@ -86,6 +86,7 @@ bench: COPY (+sources/*) /src WORKDIR /src/components/ledger/internal/storage/ledgerstore ARG numberOfTransactions=10000 + ARG ledgers=10 ARG benchTime=1s ARG count=1 ARG GOPROXY @@ -104,6 +105,7 @@ bench: go test -timeout $testTimeout -bench=$bench -run ^$ $additionalArgs \ -benchtime=$benchTime \ -count=$count \ + -ledgers=$ledgers \ -transactions=$numberOfTransactions | tee -a /results.txt END RUN benchstat /results.txt diff --git a/cmd/buckets.go b/cmd/buckets.go new file mode 100644 index 000000000..400581b57 --- /dev/null +++ b/cmd/buckets.go @@ -0,0 +1,72 @@ +package cmd + +import ( + "github.com/formancehq/ledger/internal/storage" + "github.com/formancehq/ledger/internal/storage/driver" + "github.com/formancehq/stack/libs/go-libs/logging" + "github.com/formancehq/stack/libs/go-libs/service" + "github.com/spf13/cobra" + "github.com/spf13/viper" +) + +func NewBucket() *cobra.Command { + return &cobra.Command{ + Use: "buckets", + Aliases: []string{"storage"}, + } +} + +func NewBucketUpgrade() *cobra.Command { + cmd := &cobra.Command{ + Use: "upgrade", + Args: cobra.ExactArgs(1), + SilenceUsage: true, + RunE: func(cmd *cobra.Command, args []string) error { + + driver := driver.New(storage.ConnectionOptionsFromFlags(viper.GetViper(), cmd.OutOrStdout(), viper.GetBool(service.DebugFlag))) + if err := driver.Initialize(cmd.Context()); err != nil { + return err + } + defer func() { + _ = driver.Close() + }() + + name := args[0] + + bucket, err := driver.OpenBucket(name) + if err != nil { + return err + } + + logger := service.GetDefaultLogger(cmd.OutOrStdout(), viper.GetBool(service.DebugFlag), false) + + return bucket.Migrate(logging.ContextWithLogger(cmd.Context(), logger)) + }, + } + return cmd +} + +func NewBucketUpgradeAll() *cobra.Command { + cmd := &cobra.Command{ + Use: "upgrade-all", + Args: cobra.ExactArgs(0), + SilenceUsage: true, + RunE: func(cmd *cobra.Command, args []string) error { + + logger := service.GetDefaultLogger(cmd.OutOrStdout(), viper.GetBool(service.DebugFlag), false) + ctx := logging.ContextWithLogger(cmd.Context(), logger) + + driver := driver.New(storage.ConnectionOptionsFromFlags(viper.GetViper(), cmd.OutOrStdout(), viper.GetBool(service.DebugFlag))) + defer func() { + _ = driver.Close() + }() + + if err := driver.Initialize(ctx); err != nil { + return err + } + + return driver.UpgradeAllBuckets(ctx) + }, + } + return cmd +} diff --git a/cmd/config.go b/cmd/config.go deleted file mode 100644 index df7794c1b..000000000 --- a/cmd/config.go +++ /dev/null @@ -1,9 +0,0 @@ -package cmd - -import "github.com/spf13/cobra" - -func NewConfig() *cobra.Command { - return &cobra.Command{ - Use: "config", - } -} diff --git a/cmd/config_init.go b/cmd/config_init.go deleted file mode 100644 index e2f9939b3..000000000 --- a/cmd/config_init.go +++ /dev/null @@ -1,19 +0,0 @@ -package cmd - -import ( - "github.com/sirupsen/logrus" - "github.com/spf13/cobra" - "github.com/spf13/viper" -) - -func NewConfigInit() *cobra.Command { - return &cobra.Command{ - Use: "init", - Run: func(cmd *cobra.Command, args []string) { - err := viper.SafeWriteConfig() - if err != nil { - logrus.Println(err) - } - }, - } -} diff --git a/cmd/root.go b/cmd/root.go index a3fc943d7..30bab3dd7 100644 --- a/cmd/root.go +++ b/cmd/root.go @@ -37,18 +37,12 @@ func NewRootCommand() *cobra.Command { serve := NewServe() version := NewVersion() - conf := NewConfig() - conf.AddCommand(NewConfigInit()) - store := NewStorage() - store.AddCommand(NewStorageInit()) - store.AddCommand(NewStorageList()) - store.AddCommand(NewStorageUpgrade()) - store.AddCommand(NewStorageUpgradeAll()) - store.AddCommand(NewStorageDelete()) + buckets := NewBucket() + buckets.AddCommand(NewBucketUpgrade()) + buckets.AddCommand(NewBucketUpgradeAll()) root.AddCommand(serve) - root.AddCommand(conf) - root.AddCommand(store) + root.AddCommand(buckets) root.AddCommand(version) root.AddCommand(NewDocCommand()) diff --git a/cmd/serve.go b/cmd/serve.go index da37646af..9dfd1b8ec 100644 --- a/cmd/serve.go +++ b/cmd/serve.go @@ -40,7 +40,7 @@ func NewServe() *cobra.Command { fx.Invoke(func(lc fx.Lifecycle, driver *driver.Driver) { if viper.GetBool(autoUpgradeFlag) { lc.Append(fx.Hook{ - OnStart: driver.UpgradeAllLedgersSchemas, + OnStart: driver.UpgradeAllBuckets, }) } }), diff --git a/cmd/storage.go b/cmd/storage.go deleted file mode 100644 index eacb83e5a..000000000 --- a/cmd/storage.go +++ /dev/null @@ -1,196 +0,0 @@ -package cmd - -import ( - "context" - "errors" - - "github.com/formancehq/ledger/internal/storage" - "github.com/formancehq/ledger/internal/storage/driver" - "github.com/formancehq/ledger/internal/storage/ledgerstore" - "github.com/formancehq/stack/libs/go-libs/logging" - "github.com/formancehq/stack/libs/go-libs/service" - "github.com/spf13/cobra" - "github.com/spf13/viper" - "go.uber.org/fx" -) - -func NewStorage() *cobra.Command { - return &cobra.Command{ - Use: "storage", - } -} - -func NewStorageInit() *cobra.Command { - cmd := &cobra.Command{ - Use: "init", - RunE: func(cmd *cobra.Command, args []string) error { - app := service.New( - cmd.OutOrStdout(), - resolveOptions( - cmd.OutOrStdout(), - fx.Invoke(func(storageDriver *driver.Driver, lc fx.Lifecycle) { - lc.Append(fx.Hook{ - OnStart: func(ctx context.Context) error { - name := viper.GetString("name") - if name == "" { - return errors.New("name is empty") - } - - exists, err := storageDriver.GetSystemStore().Exists(ctx, name) - if err != nil { - return err - } - - if exists { - return errors.New("ledger already exists") - } - - store, err := storageDriver.CreateLedgerStore(ctx, name) - if err != nil { - return err - } - - _, err = store.Migrate(ctx) - return err - }, - }) - }))..., - ) - return app.Start(cmd.Context()) - }, - } - cmd.Flags().String("name", "default", "Ledger name") - if err := viper.BindPFlags(cmd.Flags()); err != nil { - panic(err) - } - return cmd -} - -func NewStorageList() *cobra.Command { - cmd := &cobra.Command{ - Use: "list", - RunE: func(cmd *cobra.Command, args []string) error { - app := service.New(cmd.OutOrStdout(), - resolveOptions( - cmd.OutOrStdout(), - fx.Invoke(func(storageDriver *driver.Driver, lc fx.Lifecycle) { - lc.Append(fx.Hook{ - OnStart: func(ctx context.Context) error { - ledgers, err := storageDriver.GetSystemStore().ListLedgers(ctx) - if err != nil { - return err - } - if len(ledgers) == 0 { - logging.FromContext(ctx).Info("No ledger found.") - return nil - } - logging.FromContext(ctx).Infof("Ledgers: %v", ledgers) - return nil - }, - }) - }), - )..., - ) - return app.Start(cmd.Context()) - }, - } - return cmd -} - -func upgradeStore(ctx context.Context, store *ledgerstore.Store, name string) error { - modified, err := store.Migrate(ctx) - if err != nil { - return err - } - - if modified { - logging.FromContext(ctx).Infof("Storage '%s' upgraded", name) - } else { - logging.FromContext(ctx).Infof("Storage '%s' is up to date", name) - } - return nil -} - -func NewStorageUpgrade() *cobra.Command { - cmd := &cobra.Command{ - Use: "upgrade", - Args: cobra.ExactArgs(1), - SilenceUsage: true, - RunE: func(cmd *cobra.Command, args []string) error { - - driver := driver.New(storage.ConnectionOptionsFromFlags(viper.GetViper(), cmd.OutOrStdout(), viper.GetBool(service.DebugFlag))) - if err := driver.Initialize(cmd.Context()); err != nil { - return err - } - defer func() { - _ = driver.Close() - }() - - name := args[0] - store, err := driver.GetLedgerStore(cmd.Context(), name) - if err != nil { - return err - } - logger := service.GetDefaultLogger(cmd.OutOrStdout(), viper.GetBool(service.DebugFlag), false) - - return upgradeStore(logging.ContextWithLogger(cmd.Context(), logger), store, name) - }, - } - return cmd -} - -func NewStorageUpgradeAll() *cobra.Command { - cmd := &cobra.Command{ - Use: "upgrade-all", - Args: cobra.ExactArgs(0), - SilenceUsage: true, - RunE: func(cmd *cobra.Command, args []string) error { - - logger := service.GetDefaultLogger(cmd.OutOrStdout(), viper.GetBool(service.DebugFlag), false) - ctx := logging.ContextWithLogger(cmd.Context(), logger) - - driver := driver.New(storage.ConnectionOptionsFromFlags(viper.GetViper(), cmd.OutOrStdout(), viper.GetBool(service.DebugFlag))) - defer func() { - _ = driver.Close() - }() - - if err := driver.Initialize(ctx); err != nil { - return err - } - - return driver.UpgradeAllLedgersSchemas(ctx) - }, - } - return cmd -} - -func NewStorageDelete() *cobra.Command { - cmd := &cobra.Command{ - Use: "delete", - Args: cobra.ExactArgs(1), - RunE: func(cmd *cobra.Command, args []string) error { - app := service.New( - cmd.OutOrStdout(), - resolveOptions( - cmd.OutOrStdout(), - fx.Invoke(func(storageDriver *driver.Driver, lc fx.Lifecycle) { - lc.Append(fx.Hook{ - OnStart: func(ctx context.Context) error { - name := args[0] - store, err := storageDriver.GetLedgerStore(ctx, name) - if err != nil { - return err - } - if err := store.Delete(ctx); err != nil { - return err - } - return nil - }, - }) - }))..., - ) - return app.Start(cmd.Context()) - }, - } - return cmd -} diff --git a/internal/analytics/analytics.go b/internal/analytics/analytics.go index cee3078bf..06046cdd9 100644 --- a/internal/analytics/analytics.go +++ b/internal/analytics/analytics.go @@ -7,6 +7,10 @@ import ( "runtime" "time" + "github.com/formancehq/ledger/internal/storage/paginate" + "github.com/formancehq/ledger/internal/storage/systemstore" + "github.com/formancehq/stack/libs/go-libs/api" + storageerrors "github.com/formancehq/ledger/internal/storage/sqlutils" ledger "github.com/formancehq/ledger/internal" @@ -90,44 +94,49 @@ func (m *heartbeat) enqueue(ctx context.Context) error { Set(CPUCountProperty, runtime.NumCPU()). Set(TotalMemoryProperty, memory.TotalMemory()/1024/1024) - ledgers, err := m.backend.ListLedgers(ctx) - if err != nil { - return err - } - ledgersProperty := map[string]any{} - - for _, l := range ledgers { - stats := map[string]any{} - if err := func() error { - store, err := m.backend.GetLedgerStore(ctx, l) - if err != nil && err != storageerrors.ErrStoreNotFound { - return err - } - - transactions, err := store.CountTransactions(ctx) - if err != nil { - return err + err = paginate.Iterate(ctx, systemstore.NewListLedgersQuery(10), + func(ctx context.Context, q systemstore.ListLedgersQuery) (*api.Cursor[systemstore.Ledger], error) { + return m.backend.ListLedgers(ctx, q) + }, + func(cursor *api.Cursor[systemstore.Ledger]) error { + for _, l := range cursor.Data { + stats := map[string]any{} + if err := func() error { + store, err := m.backend.GetLedgerStore(ctx, l.Name) + if err != nil && err != storageerrors.ErrStoreNotFound { + return err + } + + transactions, err := store.CountTransactions(ctx) + if err != nil { + return err + } + + accounts, err := store.CountAccounts(ctx) + if err != nil { + return err + } + stats[TransactionsProperty] = transactions + stats[AccountsProperty] = accounts + + return nil + }(); err != nil { + return err + } + + digest := sha256.New() + digest.Write([]byte(l.Name)) + ledgerHash := base64.RawURLEncoding.EncodeToString(digest.Sum(nil)) + + ledgersProperty[ledgerHash] = stats } - - accounts, err := store.CountAccounts(ctx) - if err != nil { - return err - } - stats[TransactionsProperty] = transactions - stats[AccountsProperty] = accounts - return nil - }(); err != nil { - return err - } - - digest := sha256.New() - digest.Write([]byte(l)) - ledgerHash := base64.RawURLEncoding.EncodeToString(digest.Sum(nil)) - - ledgersProperty[ledgerHash] = stats + }) + if err != nil { + return err } + if len(ledgersProperty) > 0 { properties.Set(LedgersProperty, ledgersProperty) } diff --git a/internal/analytics/analytics_test.go b/internal/analytics/analytics_test.go index a0c4fc07e..c51b1e7e5 100644 --- a/internal/analytics/analytics_test.go +++ b/internal/analytics/analytics_test.go @@ -11,6 +11,10 @@ import ( "testing" "time" + sharedapi "github.com/formancehq/stack/libs/go-libs/api" + + "github.com/formancehq/ledger/internal/storage/systemstore" + "github.com/stretchr/testify/require" "go.uber.org/mock/gomock" "gopkg.in/segmentio/analytics-go.v3" @@ -124,9 +128,15 @@ func TestAnalytics(t *testing.T) { backend := NewMockBackend(ctrl) backend. EXPECT(). - ListLedgers(gomock.Any()). + ListLedgers(gomock.Any(), gomock.Any()). AnyTimes(). - Return([]string{"default"}, nil) + Return(&sharedapi.Cursor[systemstore.Ledger]{ + Data: []systemstore.Ledger{ + { + Name: "default", + }, + }, + }, nil) backend. EXPECT(). AppID(gomock.Any()). diff --git a/internal/analytics/backend.go b/internal/analytics/backend.go index 5063a3b81..4b9bf6444 100644 --- a/internal/analytics/backend.go +++ b/internal/analytics/backend.go @@ -3,6 +3,10 @@ package analytics import ( "context" + sharedapi "github.com/formancehq/stack/libs/go-libs/api" + + "github.com/formancehq/ledger/internal/storage/systemstore" + storageerrors "github.com/formancehq/ledger/internal/storage/sqlutils" "github.com/formancehq/ledger/internal/storage/driver" @@ -34,7 +38,7 @@ var _ Ledger = (*defaultLedger)(nil) type Backend interface { AppID(ctx context.Context) (string, error) - ListLedgers(ctx context.Context) ([]string, error) + ListLedgers(ctx context.Context, query systemstore.ListLedgersQuery) (*sharedapi.Cursor[systemstore.Ledger], error) GetLedgerStore(ctx context.Context, l string) (Ledger, error) } @@ -60,17 +64,19 @@ func (d defaultBackend) AppID(ctx context.Context) (string, error) { return d.appID, nil } -func (d defaultBackend) ListLedgers(ctx context.Context) ([]string, error) { - return d.driver.GetSystemStore().ListLedgers(ctx) +func (d defaultBackend) ListLedgers(ctx context.Context, query systemstore.ListLedgersQuery) (*sharedapi.Cursor[systemstore.Ledger], error) { + return d.driver.GetSystemStore().ListLedgers(ctx, query) } func (d defaultBackend) GetLedgerStore(ctx context.Context, name string) (Ledger, error) { - ledgerStore, err := d.driver.GetLedgerStore(ctx, name) + + store, err := d.driver.GetLedgerStore(ctx, name) if err != nil { return nil, err } + return &defaultLedger{ - store: ledgerStore, + store: store, }, nil } diff --git a/internal/analytics/backend_test.go b/internal/analytics/backend_test.go index 5040d0fce..4d0f64511 100644 --- a/internal/analytics/backend_test.go +++ b/internal/analytics/backend_test.go @@ -12,6 +12,8 @@ import ( context "context" reflect "reflect" + systemstore "github.com/formancehq/ledger/internal/storage/systemstore" + api "github.com/formancehq/stack/libs/go-libs/api" gomock "go.uber.org/mock/gomock" ) @@ -122,16 +124,16 @@ func (mr *MockBackendMockRecorder) GetLedgerStore(ctx, l any) *gomock.Call { } // ListLedgers mocks base method. -func (m *MockBackend) ListLedgers(ctx context.Context) ([]string, error) { +func (m *MockBackend) ListLedgers(ctx context.Context, query systemstore.ListLedgersQuery) (*api.Cursor[systemstore.Ledger], error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ListLedgers", ctx) - ret0, _ := ret[0].([]string) + ret := m.ctrl.Call(m, "ListLedgers", ctx, query) + ret0, _ := ret[0].(*api.Cursor[systemstore.Ledger]) ret1, _ := ret[1].(error) return ret0, ret1 } // ListLedgers indicates an expected call of ListLedgers. -func (mr *MockBackendMockRecorder) ListLedgers(ctx any) *gomock.Call { +func (mr *MockBackendMockRecorder) ListLedgers(ctx, query any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListLedgers", reflect.TypeOf((*MockBackend)(nil).ListLedgers), ctx) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListLedgers", reflect.TypeOf((*MockBackend)(nil).ListLedgers), ctx, query) } diff --git a/internal/api/backend/backend.go b/internal/api/backend/backend.go index 5d873fcb0..a229b9e01 100644 --- a/internal/api/backend/backend.go +++ b/internal/api/backend/backend.go @@ -9,7 +9,8 @@ import ( "github.com/formancehq/ledger/internal/engine/command" "github.com/formancehq/ledger/internal/storage/driver" "github.com/formancehq/ledger/internal/storage/ledgerstore" - "github.com/formancehq/stack/libs/go-libs/api" + "github.com/formancehq/ledger/internal/storage/systemstore" + sharedapi "github.com/formancehq/stack/libs/go-libs/api" "github.com/formancehq/stack/libs/go-libs/metadata" "github.com/formancehq/stack/libs/go-libs/migrations" ) @@ -18,14 +19,14 @@ import ( type Ledger interface { GetAccountWithVolumes(ctx context.Context, query ledgerstore.GetAccountQuery) (*ledger.ExpandedAccount, error) - GetAccountsWithVolumes(ctx context.Context, query ledgerstore.GetAccountsQuery) (*api.Cursor[ledger.ExpandedAccount], error) + GetAccountsWithVolumes(ctx context.Context, query ledgerstore.GetAccountsQuery) (*sharedapi.Cursor[ledger.ExpandedAccount], error) CountAccounts(ctx context.Context, query ledgerstore.GetAccountsQuery) (int, error) GetAggregatedBalances(ctx context.Context, q ledgerstore.GetAggregatedBalanceQuery) (ledger.BalancesByAssets, error) GetMigrationsInfo(ctx context.Context) ([]migrations.Info, error) Stats(ctx context.Context) (engine.Stats, error) - GetLogs(ctx context.Context, query ledgerstore.GetLogsQuery) (*api.Cursor[ledger.ChainedLog], error) + GetLogs(ctx context.Context, query ledgerstore.GetLogsQuery) (*sharedapi.Cursor[ledger.ChainedLog], error) CountTransactions(ctx context.Context, query ledgerstore.GetTransactionsQuery) (int, error) - GetTransactions(ctx context.Context, query ledgerstore.GetTransactionsQuery) (*api.Cursor[ledger.ExpandedTransaction], error) + GetTransactions(ctx context.Context, query ledgerstore.GetTransactionsQuery) (*sharedapi.Cursor[ledger.ExpandedTransaction], error) GetTransactionWithVolumes(ctx context.Context, query ledgerstore.GetTransactionQuery) (*ledger.ExpandedTransaction, error) CreateTransaction(ctx context.Context, parameters command.Parameters, data ledger.RunScript) (*ledger.Transaction, error) @@ -37,8 +38,10 @@ type Ledger interface { } type Backend interface { - GetLedger(ctx context.Context, name string) (Ledger, error) - ListLedgers(ctx context.Context) ([]string, error) + GetLedgerEngine(ctx context.Context, name string) (Ledger, error) + GetLedger(ctx context.Context, name string) (*systemstore.Ledger, error) + ListLedgers(ctx context.Context, query systemstore.ListLedgersQuery) (*sharedapi.Cursor[systemstore.Ledger], error) + CreateLedger(ctx context.Context, name string, configuration driver.LedgerConfiguration) error GetVersion() string } @@ -48,12 +51,22 @@ type DefaultBackend struct { version string } -func (d DefaultBackend) GetLedger(ctx context.Context, name string) (Ledger, error) { +func (d DefaultBackend) GetLedger(ctx context.Context, name string) (*systemstore.Ledger, error) { + return d.storageDriver.GetSystemStore().GetLedger(ctx, name) +} + +func (d DefaultBackend) CreateLedger(ctx context.Context, name string, configuration driver.LedgerConfiguration) error { + _, err := d.resolver.CreateLedger(ctx, name, configuration) + + return err +} + +func (d DefaultBackend) GetLedgerEngine(ctx context.Context, name string) (Ledger, error) { return d.resolver.GetLedger(ctx, name) } -func (d DefaultBackend) ListLedgers(ctx context.Context) ([]string, error) { - return d.storageDriver.GetSystemStore().ListLedgers(ctx) +func (d DefaultBackend) ListLedgers(ctx context.Context, query systemstore.ListLedgersQuery) (*sharedapi.Cursor[systemstore.Ledger], error) { + return d.storageDriver.GetSystemStore().ListLedgers(ctx, query) } func (d DefaultBackend) GetVersion() string { diff --git a/internal/api/backend/backend_generated.go b/internal/api/backend/backend_generated.go index 7d5880538..7df87e302 100644 --- a/internal/api/backend/backend_generated.go +++ b/internal/api/backend/backend_generated.go @@ -16,7 +16,9 @@ import ( ledger "github.com/formancehq/ledger/internal" engine "github.com/formancehq/ledger/internal/engine" command "github.com/formancehq/ledger/internal/engine/command" + driver "github.com/formancehq/ledger/internal/storage/driver" ledgerstore "github.com/formancehq/ledger/internal/storage/ledgerstore" + systemstore "github.com/formancehq/ledger/internal/storage/systemstore" api "github.com/formancehq/stack/libs/go-libs/api" metadata "github.com/formancehq/stack/libs/go-libs/metadata" migrations "github.com/formancehq/stack/libs/go-libs/migrations" @@ -292,11 +294,25 @@ func (m *MockBackend) EXPECT() *MockBackendMockRecorder { return m.recorder } +// CreateLedger mocks base method. +func (m *MockBackend) CreateLedger(ctx context.Context, name string, configuration driver.LedgerConfiguration) error { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "CreateLedger", ctx, name, configuration) + ret0, _ := ret[0].(error) + return ret0 +} + +// CreateLedger indicates an expected call of CreateLedger. +func (mr *MockBackendMockRecorder) CreateLedger(ctx, name, configuration any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "CreateLedger", reflect.TypeOf((*MockBackend)(nil).CreateLedger), ctx, name, configuration) +} + // GetLedger mocks base method. -func (m *MockBackend) GetLedger(ctx context.Context, name string) (Ledger, error) { +func (m *MockBackend) GetLedger(ctx context.Context, name string) (*systemstore.Ledger, error) { m.ctrl.T.Helper() ret := m.ctrl.Call(m, "GetLedger", ctx, name) - ret0, _ := ret[0].(Ledger) + ret0, _ := ret[0].(*systemstore.Ledger) ret1, _ := ret[1].(error) return ret0, ret1 } @@ -307,6 +323,21 @@ func (mr *MockBackendMockRecorder) GetLedger(ctx, name any) *gomock.Call { return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetLedger", reflect.TypeOf((*MockBackend)(nil).GetLedger), ctx, name) } +// GetLedgerEngine mocks base method. +func (m *MockBackend) GetLedgerEngine(ctx context.Context, name string) (Ledger, error) { + m.ctrl.T.Helper() + ret := m.ctrl.Call(m, "GetLedgerEngine", ctx, name) + ret0, _ := ret[0].(Ledger) + ret1, _ := ret[1].(error) + return ret0, ret1 +} + +// GetLedgerEngine indicates an expected call of GetLedgerEngine. +func (mr *MockBackendMockRecorder) GetLedgerEngine(ctx, name any) *gomock.Call { + mr.mock.ctrl.T.Helper() + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "GetLedgerEngine", reflect.TypeOf((*MockBackend)(nil).GetLedgerEngine), ctx, name) +} + // GetVersion mocks base method. func (m *MockBackend) GetVersion() string { m.ctrl.T.Helper() @@ -322,16 +353,16 @@ func (mr *MockBackendMockRecorder) GetVersion() *gomock.Call { } // ListLedgers mocks base method. -func (m *MockBackend) ListLedgers(ctx context.Context) ([]string, error) { +func (m *MockBackend) ListLedgers(ctx context.Context, query systemstore.ListLedgersQuery) (*api.Cursor[systemstore.Ledger], error) { m.ctrl.T.Helper() - ret := m.ctrl.Call(m, "ListLedgers", ctx) - ret0, _ := ret[0].([]string) + ret := m.ctrl.Call(m, "ListLedgers", ctx, query) + ret0, _ := ret[0].(*api.Cursor[systemstore.Ledger]) ret1, _ := ret[1].(error) return ret0, ret1 } // ListLedgers indicates an expected call of ListLedgers. -func (mr *MockBackendMockRecorder) ListLedgers(ctx any) *gomock.Call { +func (mr *MockBackendMockRecorder) ListLedgers(ctx, query any) *gomock.Call { mr.mock.ctrl.T.Helper() - return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListLedgers", reflect.TypeOf((*MockBackend)(nil).ListLedgers), ctx) + return mr.mock.ctrl.RecordCallWithMethodType(mr.mock, "ListLedgers", reflect.TypeOf((*MockBackend)(nil).ListLedgers), ctx, query) } diff --git a/internal/api/backend/resolver.go b/internal/api/backend/resolver.go index f6bd61324..fe328245c 100644 --- a/internal/api/backend/resolver.go +++ b/internal/api/backend/resolver.go @@ -7,6 +7,8 @@ import ( "sync" "time" + "github.com/formancehq/ledger/internal/storage/sqlutils" + sharedapi "github.com/formancehq/stack/libs/go-libs/api" "github.com/pkg/errors" @@ -70,15 +72,28 @@ func LedgerMiddleware( r = r.WithContext(logging.ContextWithFields(r.Context(), loggerFields)) - l, err := resolver.GetLedger(r.Context(), name) + l, err := resolver.GetLedgerEngine(r.Context(), name) if err != nil { - sharedapi.BadRequest(w, sharedapi.ErrorInternal, err) + switch { + case sqlutils.IsNotFoundError(err): + sharedapi.NotFound(w) + default: + sharedapi.InternalServerError(w, r, err) + } return } + pathWithoutLedger := r.URL.Path[1:] + nextSlash := strings.Index(pathWithoutLedger, "/") + if nextSlash >= 0 { + pathWithoutLedger = pathWithoutLedger[nextSlash:] + } else { + pathWithoutLedger = "" + } + excluded := false for _, path := range excludePathFromSchemaCheck { - if strings.HasSuffix(r.URL.Path, path) { + if pathWithoutLedger == path { excluded = true break } diff --git a/internal/api/router.go b/internal/api/router.go index 6460a6c25..7181b91ba 100644 --- a/internal/api/router.go +++ b/internal/api/router.go @@ -28,7 +28,7 @@ func NewRouter( mux.Use(ReadOnly) } v2Router := v2.NewRouter(backend, healthController, globalMetricsRegistry) - mux.Handle("/v2/*", http.StripPrefix("/v2", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + mux.Handle("/v2*", http.StripPrefix("/v2", http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { chi.RouteContext(r.Context()).Reset() v2Router.ServeHTTP(w, r) }))) diff --git a/internal/api/v1/api_utils_test.go b/internal/api/v1/api_utils_test.go index 77896d3c6..f6358295e 100644 --- a/internal/api/v1/api_utils_test.go +++ b/internal/api/v1/api_utils_test.go @@ -3,6 +3,8 @@ package v1_test import ( "testing" + "github.com/formancehq/ledger/internal/storage/systemstore" + "github.com/formancehq/ledger/internal/api/backend" "go.uber.org/mock/gomock" ) @@ -15,6 +17,14 @@ func newTestingBackend(t *testing.T, expectedSchemaCheck bool) (*backend.MockBac EXPECT(). GetLedger(gomock.Any(), gomock.Any()). MinTimes(0). + Return(&systemstore.Ledger{}, nil) + t.Cleanup(func() { + ctrl.Finish() + }) + backend. + EXPECT(). + GetLedgerEngine(gomock.Any(), gomock.Any()). + MinTimes(0). Return(mockLedger, nil) t.Cleanup(func() { ctrl.Finish() diff --git a/internal/api/v1/controllers_config.go b/internal/api/v1/controllers_config.go index f0036becd..b5a7d971d 100644 --- a/internal/api/v1/controllers_config.go +++ b/internal/api/v1/controllers_config.go @@ -1,9 +1,15 @@ package v1 import ( + "context" _ "embed" "net/http" + "github.com/formancehq/ledger/internal/storage/paginate" + + "github.com/formancehq/ledger/internal/storage/systemstore" + "github.com/formancehq/stack/libs/go-libs/collectionutils" + "github.com/formancehq/ledger/internal/api/backend" sharedapi "github.com/formancehq/stack/libs/go-libs/api" ) @@ -25,9 +31,21 @@ type LedgerStorage struct { func getInfo(backend backend.Backend) func(w http.ResponseWriter, r *http.Request) { return func(w http.ResponseWriter, r *http.Request) { - ledgers, err := backend.ListLedgers(r.Context()) - if err != nil { - panic(err) + + ledgerNames := make([]string, 0) + if err := paginate.Iterate(r.Context(), systemstore.NewListLedgersQuery(100), + func(ctx context.Context, q systemstore.ListLedgersQuery) (*sharedapi.Cursor[systemstore.Ledger], error) { + return backend.ListLedgers(ctx, q) + }, + func(cursor *sharedapi.Cursor[systemstore.Ledger]) error { + ledgerNames = append(ledgerNames, collectionutils.Map(cursor.Data, func(from systemstore.Ledger) string { + return from.Name + })...) + return nil + }, + ); err != nil { + sharedapi.InternalServerError(w, r, err) + return } sharedapi.Ok(w, ConfigInfo{ @@ -36,7 +54,7 @@ func getInfo(backend backend.Backend) func(w http.ResponseWriter, r *http.Reques Config: &LedgerConfig{ LedgerStorage: &LedgerStorage{ Driver: "postgres", - Ledgers: ledgers, + Ledgers: ledgerNames, }, }, }) diff --git a/internal/api/v1/controllers_config_test.go b/internal/api/v1/controllers_config_test.go index c5123def6..0659d02b0 100644 --- a/internal/api/v1/controllers_config_test.go +++ b/internal/api/v1/controllers_config_test.go @@ -1,12 +1,15 @@ package v1_test import ( - "encoding/json" "net/http" "net/http/httptest" "testing" - v2 "github.com/formancehq/ledger/internal/api/v2" + v1 "github.com/formancehq/ledger/internal/api/v1" + sharedapi "github.com/formancehq/stack/libs/go-libs/api" + + "github.com/formancehq/ledger/internal/storage/systemstore" + "github.com/formancehq/ledger/internal/opentelemetry/metrics" "github.com/stretchr/testify/require" "go.uber.org/mock/gomock" @@ -16,12 +19,21 @@ func TestGetInfo(t *testing.T) { t.Parallel() backend, _ := newTestingBackend(t, false) - router := v2.NewRouter(backend, nil, metrics.NewNoOpRegistry()) + router := v1.NewRouter(backend, nil, metrics.NewNoOpRegistry()) backend. EXPECT(). - ListLedgers(gomock.Any()). - Return([]string{"a", "b"}, nil) + ListLedgers(gomock.Any(), gomock.Any()). + Return(&sharedapi.Cursor[systemstore.Ledger]{ + Data: []systemstore.Ledger{ + { + Name: "a", + }, + { + Name: "b", + }, + }, + }, nil) backend. EXPECT(). @@ -35,14 +47,13 @@ func TestGetInfo(t *testing.T) { require.Equal(t, http.StatusOK, rec.Code) - info := v2.ConfigInfo{} - require.NoError(t, json.NewDecoder(rec.Body).Decode(&info)) + info, _ := sharedapi.DecodeSingleResponse[v1.ConfigInfo](t, rec.Body) - require.EqualValues(t, v2.ConfigInfo{ + require.EqualValues(t, v1.ConfigInfo{ Server: "ledger", Version: "latest", - Config: &v2.LedgerConfig{ - LedgerStorage: &v2.LedgerStorage{ + Config: &v1.LedgerConfig{ + LedgerStorage: &v1.LedgerStorage{ Driver: "postgres", Ledgers: []string{"a", "b"}, }, diff --git a/internal/api/v1/middleware_auto_create_ledger.go b/internal/api/v1/middleware_auto_create_ledger.go new file mode 100644 index 000000000..ba718ae74 --- /dev/null +++ b/internal/api/v1/middleware_auto_create_ledger.go @@ -0,0 +1,35 @@ +package v1 + +import ( + "net/http" + + "github.com/formancehq/ledger/internal/api/backend" + "github.com/formancehq/ledger/internal/storage/driver" + "github.com/formancehq/ledger/internal/storage/sqlutils" + sharedapi "github.com/formancehq/stack/libs/go-libs/api" + "github.com/go-chi/chi/v5" +) + +func autoCreateMiddleware(backend backend.Backend) func(handler http.Handler) http.Handler { + return func(handler http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + + ledgerName := chi.URLParam(r, "ledger") + if _, err := backend.GetLedger(r.Context(), ledgerName); err != nil { + if !sqlutils.IsNotFoundError(err) { + sharedapi.InternalServerError(w, r, err) + return + } + + if err := backend.CreateLedger(r.Context(), ledgerName, driver.LedgerConfiguration{ + Bucket: ledgerName, + }); err != nil { + sharedapi.InternalServerError(w, r, err) + return + } + } + + handler.ServeHTTP(w, r) + }) + } +} diff --git a/internal/api/v1/routes.go b/internal/api/v1/routes.go index 8d0f113b7..e1346a79d 100644 --- a/internal/api/v1/routes.go +++ b/internal/api/v1/routes.go @@ -38,6 +38,7 @@ func NewRouter(b backend.Backend, healthController *health.HealthController, glo handler.ServeHTTP(w, r) }) }) + router.Use(autoCreateMiddleware(b)) router.Use(backend.LedgerMiddleware(b, []string{"/_info"})) // LedgerController diff --git a/internal/api/v2/api_utils_test.go b/internal/api/v2/api_utils_test.go index 81e6e6639..ecc4aa0fc 100644 --- a/internal/api/v2/api_utils_test.go +++ b/internal/api/v2/api_utils_test.go @@ -14,7 +14,7 @@ func newTestingBackend(t *testing.T, expectedSchemaCheck bool) (*backend.MockBac backend := backend.NewMockBackend(ctrl) backend. EXPECT(). - GetLedger(gomock.Any(), gomock.Any()). + GetLedgerEngine(gomock.Any(), gomock.Any()). MinTimes(0). Return(mockLedger, nil) t.Cleanup(func() { diff --git a/internal/api/v2/controllers_config.go b/internal/api/v2/controllers_config.go index a159dd632..783826f3a 100644 --- a/internal/api/v2/controllers_config.go +++ b/internal/api/v2/controllers_config.go @@ -9,36 +9,15 @@ import ( ) type ConfigInfo struct { - Server string `json:"server"` - Version string `json:"version"` - Config *LedgerConfig `json:"config"` -} - -type LedgerConfig struct { - LedgerStorage *LedgerStorage `json:"storage"` -} - -type LedgerStorage struct { - Driver string `json:"driver"` - Ledgers []string `json:"ledgers"` + Server string `json:"server"` + Version string `json:"version"` } func getInfo(backend backend.Backend) func(w http.ResponseWriter, r *http.Request) { return func(w http.ResponseWriter, r *http.Request) { - ledgers, err := backend.ListLedgers(r.Context()) - if err != nil { - panic(err) - } - sharedapi.RawOk(w, ConfigInfo{ Server: "ledger", Version: backend.GetVersion(), - Config: &LedgerConfig{ - LedgerStorage: &LedgerStorage{ - Driver: "postgres", - Ledgers: ledgers, - }, - }, }) } } diff --git a/internal/api/v2/controllers_config_test.go b/internal/api/v2/controllers_config_test.go index 88d74b1cd..884dd63e0 100644 --- a/internal/api/v2/controllers_config_test.go +++ b/internal/api/v2/controllers_config_test.go @@ -9,7 +9,6 @@ import ( v2 "github.com/formancehq/ledger/internal/api/v2" "github.com/formancehq/ledger/internal/opentelemetry/metrics" "github.com/stretchr/testify/require" - "go.uber.org/mock/gomock" ) func TestGetInfo(t *testing.T) { @@ -18,11 +17,6 @@ func TestGetInfo(t *testing.T) { backend, _ := newTestingBackend(t, false) router := v2.NewRouter(backend, nil, metrics.NewNoOpRegistry()) - backend. - EXPECT(). - ListLedgers(gomock.Any()). - Return([]string{"a", "b"}, nil) - backend. EXPECT(). GetVersion(). @@ -41,11 +35,5 @@ func TestGetInfo(t *testing.T) { require.EqualValues(t, v2.ConfigInfo{ Server: "ledger", Version: "latest", - Config: &v2.LedgerConfig{ - LedgerStorage: &v2.LedgerStorage{ - Driver: "postgres", - Ledgers: []string{"a", "b"}, - }, - }, }, info) } diff --git a/internal/api/v2/controllers_create_ledger.go b/internal/api/v2/controllers_create_ledger.go new file mode 100644 index 000000000..b864e09d5 --- /dev/null +++ b/internal/api/v2/controllers_create_ledger.go @@ -0,0 +1,44 @@ +package v2 + +import ( + "encoding/json" + "io" + "net/http" + + "github.com/formancehq/ledger/internal/storage/driver" + + "github.com/formancehq/ledger/internal/api/backend" + sharedapi "github.com/formancehq/stack/libs/go-libs/api" + "github.com/go-chi/chi/v5" + "github.com/pkg/errors" +) + +func createLedger(b backend.Backend) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + configuration := driver.LedgerConfiguration{} + + data, err := io.ReadAll(r.Body) + if err != nil && !errors.Is(err, io.EOF) { + sharedapi.InternalServerError(w, r, err) + return + } + + if len(data) > 0 { + if err := json.Unmarshal(data, &configuration); err != nil { + sharedapi.BadRequest(w, ErrValidation, err) + return + } + } + + if err := b.CreateLedger(r.Context(), chi.URLParam(r, "ledger"), configuration); err != nil { + switch { + case errors.Is(err, driver.ErrLedgerAlreadyExists): + sharedapi.BadRequest(w, ErrValidation, err) + default: + sharedapi.InternalServerError(w, r, err) + } + return + } + sharedapi.NoContent(w) + } +} diff --git a/internal/api/v2/controllers_create_ledger_test.go b/internal/api/v2/controllers_create_ledger_test.go new file mode 100644 index 000000000..5c98114a4 --- /dev/null +++ b/internal/api/v2/controllers_create_ledger_test.go @@ -0,0 +1,38 @@ +package v2_test + +import ( + "bytes" + "net/http" + "net/http/httptest" + "testing" + + "github.com/formancehq/ledger/internal/storage/driver" + + v2 "github.com/formancehq/ledger/internal/api/v2" + "github.com/formancehq/ledger/internal/opentelemetry/metrics" + "github.com/google/uuid" + "github.com/stretchr/testify/require" + "go.uber.org/mock/gomock" +) + +func TestConfigureLedger(t *testing.T) { + t.Parallel() + + b, _ := newTestingBackend(t, false) + router := v2.NewRouter(b, nil, metrics.NewNoOpRegistry()) + + name := uuid.NewString() + b. + EXPECT(). + CreateLedger(gomock.Any(), name, driver.LedgerConfiguration{ + Bucket: "bucket0", + }). + Return(nil) + + req := httptest.NewRequest(http.MethodPost, "/"+name, bytes.NewBufferString(`{"bucket": "bucket0"}`)) + rec := httptest.NewRecorder() + + router.ServeHTTP(rec, req) + + require.Equal(t, http.StatusNoContent, rec.Code) +} diff --git a/internal/api/v2/controllers_get_ledger.go b/internal/api/v2/controllers_get_ledger.go new file mode 100644 index 000000000..59202ebaf --- /dev/null +++ b/internal/api/v2/controllers_get_ledger.go @@ -0,0 +1,45 @@ +package v2 + +import ( + "encoding/json" + "io" + "net/http" + + "github.com/formancehq/ledger/internal/api/backend" + "github.com/formancehq/ledger/internal/storage/driver" + "github.com/formancehq/ledger/internal/storage/sqlutils" + sharedapi "github.com/formancehq/stack/libs/go-libs/api" + "github.com/go-chi/chi/v5" + "github.com/pkg/errors" +) + +func getLedger(b backend.Backend) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + configuration := driver.LedgerConfiguration{} + + data, err := io.ReadAll(r.Body) + if err != nil && !errors.Is(err, io.EOF) { + sharedapi.InternalServerError(w, r, err) + return + } + + if len(data) > 0 { + if err := json.Unmarshal(data, &configuration); err != nil { + sharedapi.BadRequest(w, ErrValidation, err) + return + } + } + + ledger, err := b.GetLedger(r.Context(), chi.URLParam(r, "ledger")) + if err != nil { + switch { + case sqlutils.IsNotFoundError(err): + sharedapi.NotFound(w) + default: + sharedapi.InternalServerError(w, r, err) + } + return + } + sharedapi.Ok(w, ledger) + } +} diff --git a/internal/api/v2/controllers_get_ledger_test.go b/internal/api/v2/controllers_get_ledger_test.go new file mode 100644 index 000000000..b56a294df --- /dev/null +++ b/internal/api/v2/controllers_get_ledger_test.go @@ -0,0 +1,45 @@ +package v2_test + +import ( + "net/http" + "net/http/httptest" + "testing" + + ledger "github.com/formancehq/ledger/internal" + "github.com/formancehq/ledger/internal/storage/systemstore" + "github.com/formancehq/stack/libs/go-libs/api" + + v2 "github.com/formancehq/ledger/internal/api/v2" + "github.com/formancehq/ledger/internal/opentelemetry/metrics" + "github.com/google/uuid" + "github.com/stretchr/testify/require" + "go.uber.org/mock/gomock" +) + +func TestGetLedger(t *testing.T) { + t.Parallel() + + b, _ := newTestingBackend(t, false) + router := v2.NewRouter(b, nil, metrics.NewNoOpRegistry()) + + name := uuid.NewString() + now := ledger.Now() + ledger := systemstore.Ledger{ + Name: name, + AddedAt: now, + Bucket: "bucket0", + } + b. + EXPECT(). + GetLedger(gomock.Any(), name). + Return(&ledger, nil) + + req := httptest.NewRequest(http.MethodGet, "/"+name, nil) + rec := httptest.NewRecorder() + + router.ServeHTTP(rec, req) + + require.Equal(t, http.StatusOK, rec.Code) + ledgerFromAPI, _ := api.DecodeSingleResponse[systemstore.Ledger](t, rec.Body) + require.Equal(t, ledger, ledgerFromAPI) +} diff --git a/internal/api/v2/controllers_list_ledgers.go b/internal/api/v2/controllers_list_ledgers.go new file mode 100644 index 000000000..46f997134 --- /dev/null +++ b/internal/api/v2/controllers_list_ledgers.go @@ -0,0 +1,43 @@ +package v2 + +import ( + "fmt" + "net/http" + + "github.com/formancehq/ledger/internal/storage/paginate" + "github.com/formancehq/ledger/internal/storage/systemstore" + sharedapi "github.com/formancehq/stack/libs/go-libs/api" + + "github.com/formancehq/ledger/internal/api/backend" +) + +func listLedgers(b backend.Backend) http.HandlerFunc { + return func(w http.ResponseWriter, r *http.Request) { + + query := systemstore.ListLedgersQuery{} + + if r.URL.Query().Get(QueryKeyCursor) != "" { + err := paginate.UnmarshalCursor(r.URL.Query().Get(QueryKeyCursor), &query) + if err != nil { + sharedapi.BadRequest(w, ErrValidation, fmt.Errorf("invalid '%s' query param", QueryKeyCursor)) + return + } + } else { + pageSize, err := getPageSize(r) + if err != nil { + sharedapi.BadRequest(w, ErrValidation, err) + return + } + + query = systemstore.NewListLedgersQuery(pageSize) + } + + ledgers, err := b.ListLedgers(r.Context(), query) + if err != nil { + sharedapi.InternalServerError(w, r, err) + return + } + + sharedapi.RenderCursor(w, *ledgers) + } +} diff --git a/internal/api/v2/routes.go b/internal/api/v2/routes.go index f9c1c29d7..443ce94e0 100644 --- a/internal/api/v2/routes.go +++ b/internal/api/v2/routes.go @@ -36,36 +36,39 @@ func NewRouter( router.Use(otelchi.Middleware("ledger")) router.Get("/_info", getInfo(b)) + router.Get("/", listLedgers(b)) router.Route("/{ledger}", func(router chi.Router) { - router.Use(backend.LedgerMiddleware(b, []string{"/_info"})) + router.Post("/", createLedger(b)) + router.Get("/", getLedger(b)) - router.Post("/_bulk", bulkHandler) + router.With(backend.LedgerMiddleware(b, []string{"/_info"})).Group(func(router chi.Router) { + router.Post("/_bulk", bulkHandler) - // LedgerController - router.Get("/_info", getLedgerInfo) - router.Get("/stats", getStats) - router.Get("/logs", getLogs) + // LedgerController + router.Get("/_info", getLedgerInfo) + router.Get("/stats", getStats) + router.Get("/logs", getLogs) - // AccountController - router.Get("/accounts", getAccounts) - router.Head("/accounts", countAccounts) - router.Get("/accounts/{address}", getAccount) - router.Post("/accounts/{address}/metadata", postAccountMetadata) - router.Delete("/accounts/{address}/metadata/{key}", deleteAccountMetadata) + // AccountController + router.Get("/accounts", getAccounts) + router.Head("/accounts", countAccounts) + router.Get("/accounts/{address}", getAccount) + router.Post("/accounts/{address}/metadata", postAccountMetadata) + router.Delete("/accounts/{address}/metadata/{key}", deleteAccountMetadata) - // TransactionController - router.Get("/transactions", getTransactions) - router.Head("/transactions", countTransactions) + // TransactionController + router.Get("/transactions", getTransactions) + router.Head("/transactions", countTransactions) - router.Post("/transactions", postTransaction) + router.Post("/transactions", postTransaction) - router.Get("/transactions/{id}", getTransaction) - router.Post("/transactions/{id}/revert", revertTransaction) - router.Post("/transactions/{id}/metadata", postTransactionMetadata) - router.Delete("/transactions/{id}/metadata/{key}", deleteTransactionMetadata) + router.Get("/transactions/{id}", getTransaction) + router.Post("/transactions/{id}/revert", revertTransaction) + router.Post("/transactions/{id}/metadata", postTransactionMetadata) + router.Delete("/transactions/{id}/metadata/{key}", deleteTransactionMetadata) - // TODO: Rename to /aggregatedBalances - router.Get("/aggregate/balances", getBalancesAggregated) + router.Get("/aggregate/balances", getBalancesAggregated) + }) }) }) diff --git a/internal/engine/ledger.go b/internal/engine/ledger.go index 92f45ed25..d64ee89c7 100644 --- a/internal/engine/ledger.go +++ b/internal/engine/ledger.go @@ -122,7 +122,7 @@ func (l *Ledger) IsDatabaseUpToDate(ctx context.Context) (bool, error) { return true, nil } var err error - l.isSchemaUpToDate, err = l.store.IsSchemaUpToDate(ctx) + l.isSchemaUpToDate, err = l.store.IsUpToDate(ctx) return l.isSchemaUpToDate, err } diff --git a/internal/engine/resolver.go b/internal/engine/resolver.go index 70cfc1fbe..4de8269bb 100644 --- a/internal/engine/resolver.go +++ b/internal/engine/resolver.go @@ -4,6 +4,8 @@ import ( "context" "sync" + "github.com/formancehq/ledger/internal/storage/ledgerstore" + "github.com/ThreeDotsLabs/watermill/message" "github.com/formancehq/ledger/internal/engine/command" "github.com/formancehq/ledger/internal/opentelemetry/metrics" @@ -67,6 +69,15 @@ func NewResolver(storageDriver *driver.Driver, options ...option) *Resolver { return r } +func (r *Resolver) startLedgerUsingStore(ctx context.Context, name string, store *ledgerstore.Store) *Ledger { + ledger := New(store, r.publisher, r.compiler) + ledger.Start(logging.ContextWithLogger(context.Background(), r.logger)) + r.ledgers[name] = ledger + r.metricsRegistry.ActiveLedgers().Add(ctx, +1) + + return ledger +} + func (r *Resolver) GetLedger(ctx context.Context, name string) (*Ledger, error) { r.lock.RLock() ledger, ok := r.ledgers[name] @@ -76,8 +87,6 @@ func (r *Resolver) GetLedger(ctx context.Context, name string) (*Ledger, error) r.lock.Lock() defer r.lock.Unlock() - logging.FromContext(ctx).Infof("Initialize new ledger") - ledger, ok = r.ledgers[name] if ok { return ledger, nil @@ -88,15 +97,24 @@ func (r *Resolver) GetLedger(ctx context.Context, name string) (*Ledger, error) return nil, err } - ledger = New(store, r.publisher, r.compiler) - ledger.Start(logging.ContextWithLogger(context.Background(), r.logger)) - r.ledgers[name] = ledger - r.metricsRegistry.ActiveLedgers().Add(ctx, +1) + ledger = r.startLedgerUsingStore(ctx, name, store) } return ledger, nil } +func (r *Resolver) CreateLedger(ctx context.Context, name string, configuration driver.LedgerConfiguration) (*Ledger, error) { + r.lock.Lock() + defer r.lock.Unlock() + + store, err := r.storageDriver.CreateLedgerStore(ctx, name, configuration) + if err != nil { + return nil, err + } + + return r.startLedgerUsingStore(ctx, name, store), nil +} + func (r *Resolver) CloseLedgers(ctx context.Context) error { r.logger.Info("Close all ledgers") defer func() { diff --git a/internal/storage/driver/driver.go b/internal/storage/driver/driver.go index 9ec858829..ccc38ea7f 100644 --- a/internal/storage/driver/driver.go +++ b/internal/storage/driver/driver.go @@ -2,99 +2,141 @@ package driver import ( "context" - "fmt" + "database/sql" "sync" - "github.com/formancehq/ledger/internal/storage/sqlutils" + "github.com/formancehq/ledger/internal/storage/paginate" + "github.com/formancehq/stack/libs/go-libs/api" + + ledger "github.com/formancehq/ledger/internal" + "github.com/formancehq/stack/libs/go-libs/collectionutils" + "github.com/pkg/errors" + "github.com/uptrace/bun" "github.com/formancehq/ledger/internal/storage/ledgerstore" + + "github.com/formancehq/ledger/internal/storage/sqlutils" + "github.com/formancehq/ledger/internal/storage/systemstore" "github.com/formancehq/stack/libs/go-libs/logging" - "github.com/pkg/errors" - "github.com/uptrace/bun" ) -const SystemSchema = "_system" +const defaultBucket = "_default" + +var ( + ErrNeedUpgradeBucket = errors.New("need to upgrade bucket before add a new ledger on it") + ErrLedgerAlreadyExists = errors.New("ledger already exists") +) + +type LedgerConfiguration struct { + Bucket string `json:"bucket"` +} type Driver struct { systemStore *systemstore.Store lock sync.Mutex connectionOptions sqlutils.ConnectionOptions + buckets map[string]*ledgerstore.Bucket db *bun.DB - databasesBySchema map[string]*bun.DB } func (d *Driver) GetSystemStore() *systemstore.Store { return d.systemStore } -func (d *Driver) newLedgerStore(name string) (*ledgerstore.Store, error) { - db, err := sqlutils.OpenDBWithSchema(d.connectionOptions, name) +func (d *Driver) OpenBucket(name string) (*ledgerstore.Bucket, error) { + + bucket, ok := d.buckets[name] + if ok { + return bucket, nil + } + + b, err := ledgerstore.ConnectToBucket(d.connectionOptions, name) if err != nil { return nil, err } - d.databasesBySchema[name] = db + d.buckets[name] = b - return ledgerstore.New(db, name, func(ctx context.Context) error { - return d.GetSystemStore().DeleteLedger(ctx, name) - }) + return b, nil } -func (d *Driver) createLedgerStore(ctx context.Context, name string) (*ledgerstore.Store, error) { - if name == SystemSchema { - return nil, errors.New("reserved name") - } +func (d *Driver) GetLedgerStore(ctx context.Context, name string) (*ledgerstore.Store, error) { + d.lock.Lock() + defer d.lock.Unlock() - exists, err := d.systemStore.Exists(ctx, name) + ledgerConfiguration, err := d.systemStore.GetLedger(ctx, name) if err != nil { return nil, err } - if exists { - return nil, sqlutils.ErrStoreAlreadyExists - } - _, err = d.systemStore.Register(ctx, name) + bucket, err := d.OpenBucket(ledgerConfiguration.Bucket) if err != nil { return nil, err } - store, err := d.newLedgerStore(name) + return bucket.GetLedgerStore(name) +} + +func (f *Driver) CreateLedgerStore(ctx context.Context, name string, configuration LedgerConfiguration) (*ledgerstore.Store, error) { + + tx, err := f.db.BeginTx(ctx, &sql.TxOptions{}) if err != nil { return nil, err } + defer func() { + _ = tx.Rollback() + }() - _, err = store.Migrate(ctx) - - return store, err -} - -func (d *Driver) CreateLedgerStore(ctx context.Context, name string) (*ledgerstore.Store, error) { - d.lock.Lock() - defer d.lock.Unlock() + if _, err := f.systemStore.GetLedger(ctx, name); err == nil { + return nil, ErrLedgerAlreadyExists + } else if !sqlutils.IsNotFoundError(err) { + return nil, err + } - return d.createLedgerStore(ctx, name) -} + bucketName := defaultBucket + if configuration.Bucket != "" { + bucketName = configuration.Bucket + } -func (d *Driver) GetLedgerStore(ctx context.Context, name string) (*ledgerstore.Store, error) { - d.lock.Lock() - defer d.lock.Unlock() + bucket, err := f.OpenBucket(bucketName) + if err != nil { + return nil, errors.Wrap(err, "opening bucket") + } - exists, err := d.systemStore.Exists(ctx, name) + isInitialized, err := bucket.IsInitialized(ctx) if err != nil { - return nil, err + return nil, errors.Wrap(err, "checking if bucket is initialized") } - var store *ledgerstore.Store - if !exists { - store, err = d.createLedgerStore(ctx, name) + if isInitialized { + isUpToDate, err := bucket.IsUpToDate(ctx) + if err != nil { + return nil, errors.Wrap(err, "checking if bucket is up to date") + } + if !isUpToDate { + return nil, ErrNeedUpgradeBucket + } } else { - store, err = d.newLedgerStore(name) + if err := ledgerstore.MigrateBucket(ctx, tx, bucketName); err != nil { + return nil, errors.Wrap(err, "migrating bucket") + } } + + store, err := bucket.GetLedgerStore(name) if err != nil { - return nil, err + return nil, errors.Wrap(err, "getting ledger store") } - return store, nil + _, err = systemstore.RegisterLedger(ctx, tx, &systemstore.Ledger{ + Name: name, + AddedAt: ledger.Now(), + Bucket: bucketName, + }) + if err != nil { + return nil, errors.Wrap(err, "registring ledger on system store") + } + + return store, errors.Wrap(tx.Commit(), "committing sql transaction") } func (d *Driver) Initialize(ctx context.Context) error { @@ -103,43 +145,48 @@ func (d *Driver) Initialize(ctx context.Context) error { var err error d.db, err = sqlutils.OpenSQLDB(d.connectionOptions) if err != nil { - return sqlutils.PostgresError(err) + return errors.Wrap(err, "connecting to database") } - _, err = d.db.ExecContext(ctx, fmt.Sprintf(`create schema if not exists "%s"`, SystemSchema)) - if err != nil { - return sqlutils.PostgresError(err) + if err := systemstore.Migrate(ctx, d.db); err != nil { + return errors.Wrap(err, "migrating data") } - dbWithSystemSchema, err := sqlutils.OpenDBWithSchema(d.connectionOptions, SystemSchema) + d.systemStore, err = systemstore.Connect(ctx, d.connectionOptions) if err != nil { - return sqlutils.PostgresError(err) - } - - d.systemStore = systemstore.NewStore(dbWithSystemSchema) - - if err := d.systemStore.Initialize(ctx); err != nil { - return err + return errors.Wrap(err, "connecting to system store") } return nil } -func (d *Driver) UpgradeAllLedgersSchemas(ctx context.Context) error { +func (d *Driver) UpgradeAllBuckets(ctx context.Context) error { + systemStore := d.GetSystemStore() - ledgers, err := systemStore.ListLedgers(ctx) + + buckets := collectionutils.Set[string]{} + err := paginate.Iterate(ctx, systemstore.NewListLedgersQuery(10), + func(ctx context.Context, q systemstore.ListLedgersQuery) (*api.Cursor[systemstore.Ledger], error) { + return systemStore.ListLedgers(ctx, q) + }, + func(cursor *api.Cursor[systemstore.Ledger]) error { + for _, name := range cursor.Data { + buckets.Put(name.Name) + } + return nil + }) if err != nil { return err } - for _, ledger := range ledgers { - store, err := d.GetLedgerStore(ctx, ledger) + for _, bucket := range collectionutils.Keys(buckets) { + bucket, err := d.OpenBucket(bucket) if err != nil { return err } - logging.FromContext(ctx).Infof("Upgrading storage '%s'", ledger) - if _, err := store.Migrate(ctx); err != nil { + logging.FromContext(ctx).Infof("Upgrading bucket '%s'", bucket) + if err := bucket.Migrate(ctx); err != nil { return err } } @@ -148,20 +195,16 @@ func (d *Driver) UpgradeAllLedgersSchemas(ctx context.Context) error { } func (d *Driver) Close() error { - if d.systemStore != nil { - if err := d.systemStore.Close(); err != nil { - return err - } + if err := d.systemStore.Close(); err != nil { + return err } - for _, db := range d.databasesBySchema { - if err := db.Close(); err != nil { + for _, b := range d.buckets { + if err := b.Close(); err != nil { return err } } - if d.db != nil { - if err := d.db.Close(); err != nil { - return err - } + if err := d.db.Close(); err != nil { + return err } return nil } @@ -169,6 +212,6 @@ func (d *Driver) Close() error { func New(connectionOptions sqlutils.ConnectionOptions) *Driver { return &Driver{ connectionOptions: connectionOptions, - databasesBySchema: make(map[string]*bun.DB), + buckets: make(map[string]*ledgerstore.Bucket), } } diff --git a/internal/storage/driver/driver_test.go b/internal/storage/driver/driver_test.go index 9dce29fc5..3ec9a572d 100644 --- a/internal/storage/driver/driver_test.go +++ b/internal/storage/driver/driver_test.go @@ -1,9 +1,11 @@ package driver_test import ( - "context" + "fmt" "testing" + "github.com/formancehq/ledger/internal/storage/driver" + "github.com/formancehq/ledger/internal/storage/sqlutils" "github.com/formancehq/stack/libs/go-libs/logging" @@ -14,44 +16,88 @@ import ( ) func TestConfiguration(t *testing.T) { + t.Parallel() + d := storagetesting.StorageDriver(t) - defer func() { - _ = d.Close() - }() + ctx := logging.TestingContext() - require.NoError(t, d.GetSystemStore().InsertConfiguration(context.Background(), "foo", "bar")) - bar, err := d.GetSystemStore().GetConfiguration(context.Background(), "foo") + require.NoError(t, d.GetSystemStore().InsertConfiguration(ctx, "foo", "bar")) + bar, err := d.GetSystemStore().GetConfiguration(ctx, "foo") require.NoError(t, err) require.Equal(t, "bar", bar) } func TestConfigurationError(t *testing.T) { + t.Parallel() + d := storagetesting.StorageDriver(t) - defer func() { - _ = d.Close() - }() + ctx := logging.TestingContext() - _, err := d.GetSystemStore().GetConfiguration(context.Background(), "not_existing") + _, err := d.GetSystemStore().GetConfiguration(ctx, "not_existing") require.Error(t, err) require.True(t, sqlutils.IsNotFoundError(err)) } -func TestErrorOnOutdatedSchema(t *testing.T) { - d := storagetesting.StorageDriver(t) - defer func() { - require.NoError(t, d.Close()) - }() +func TestErrorOnOutdatedBucket(t *testing.T) { + t.Parallel() ctx := logging.TestingContext() + d := storagetesting.StorageDriver(t) name := uuid.NewString() - _, err := d.GetSystemStore().Register(ctx, name) - require.NoError(t, err) - store, err := d.GetLedgerStore(ctx, name) + b, err := d.OpenBucket(name) require.NoError(t, err) + t.Cleanup(func() { + _ = b.Close() + }) - upToDate, err := store.IsSchemaUpToDate(ctx) + upToDate, err := b.IsUpToDate(ctx) require.NoError(t, err) require.False(t, upToDate) } + +func TestGetLedgerFromDefaultBucket(t *testing.T) { + t.Parallel() + + d := storagetesting.StorageDriver(t) + ctx := logging.TestingContext() + + name := uuid.NewString() + _, err := d.CreateLedgerStore(ctx, name, driver.LedgerConfiguration{}) + require.NoError(t, err) +} + +func TestGetLedgerFromAlternateBucket(t *testing.T) { + t.Parallel() + + d := storagetesting.StorageDriver(t) + ctx := logging.TestingContext() + + ledgerName := "ledger0" + bucketName := "bucket0" + + _, err := d.CreateLedgerStore(ctx, ledgerName, driver.LedgerConfiguration{ + Bucket: bucketName, + }) + require.NoError(t, err) +} + +func TestUpgradeAllBuckets(t *testing.T) { + t.Parallel() + + d := storagetesting.StorageDriver(t) + ctx := logging.TestingContext() + + count := 30 + + for i := 0; i < count; i++ { + name := fmt.Sprintf("ledger%d", i) + _, err := d.CreateLedgerStore(ctx, name, driver.LedgerConfiguration{ + Bucket: name, + }) + require.NoError(t, err) + } + + require.NoError(t, d.UpgradeAllBuckets(ctx)) +} diff --git a/internal/storage/ledgerstore/accounts.go b/internal/storage/ledgerstore/accounts.go index a688849c8..2f7641a9b 100644 --- a/internal/storage/ledgerstore/accounts.go +++ b/internal/storage/ledgerstore/accounts.go @@ -20,6 +20,7 @@ func (store *Store) buildAccountQuery(q PITFilterWithVolumes, query *bun.SelectQ query = query. Column("accounts.address"). Table("accounts"). + Where("accounts.ledger = ?", store.name). Apply(filterPIT(q.PIT, "insertion_date")). Order("accounts.address") @@ -27,7 +28,7 @@ func (store *Store) buildAccountQuery(q PITFilterWithVolumes, query *bun.SelectQ query = query. Column("accounts.address"). ColumnExpr("accounts_metadata.metadata"). - Join("left join accounts_metadata on accounts_metadata.address = accounts.address and accounts_metadata.date < ?", q.PIT). + Join("left join accounts_metadata on accounts_metadata.accounts_seq = accounts.seq and accounts_metadata.date < ?", q.PIT). Order("revision desc") } else { query = query.Column("metadata") @@ -36,13 +37,13 @@ func (store *Store) buildAccountQuery(q PITFilterWithVolumes, query *bun.SelectQ if q.ExpandVolumes { query = query. ColumnExpr("volumes.*"). - Join("join get_account_aggregated_volumes(accounts.address, ?) volumes on true", q.PIT) + Join("join get_account_aggregated_volumes(?, accounts.address, ?) volumes on true", store.name, q.PIT) } if q.ExpandEffectiveVolumes { query = query. ColumnExpr("effective_volumes.*"). - Join("join get_account_aggregated_effective_volumes(accounts.address, ?) effective_volumes on true", q.PIT) + Join("join get_account_aggregated_effective_volumes(?, accounts.address, ?) effective_volumes on true", store.name, q.PIT) } return query @@ -85,18 +86,18 @@ func (store *Store) accountQueryContext(qb query.Builder, q GetAccountsQuery) (s return `( select balance_from_volumes(post_commit_volumes) from moves - where asset = ? and account_address = accounts.address + where asset = ? and account_address = accounts.address and ledger = ? order by seq desc limit 1 - ) < ?`, []any{match[0][1], value}, nil + ) < ?`, []any{match[0][1], store.name, value}, nil case key == "balance": return `( select balance_from_volumes(post_commit_volumes) from moves - where account_address = accounts.address + where account_address = accounts.address and ledger = ? order by seq desc limit 1 - ) < ?`, nil, nil + ) < ?`, []any{store.name, value}, nil default: return "", nil, newErrInvalidQuery("unknown key '%s' when building query", key) } @@ -143,6 +144,7 @@ func (store *Store) GetAccount(ctx context.Context, address string) (*ledger.Acc Table("accounts"). Join("left join accounts_metadata on accounts_metadata.address = accounts.address"). Where("accounts.address = ?", address). + Where("accounts.ledger = ?", store.name). Order("revision desc"). Limit(1) }) diff --git a/internal/storage/ledgerstore/accounts_test.go b/internal/storage/ledgerstore/accounts_test.go index 0fd707bcc..5c3cac918 100644 --- a/internal/storage/ledgerstore/accounts_test.go +++ b/internal/storage/ledgerstore/accounts_test.go @@ -56,14 +56,14 @@ func TestGetAccounts(t *testing.T) { t.Run("list all", func(t *testing.T) { t.Parallel() - accounts, err := store.GetAccountsWithVolumes(ctx, NewGetAccountsQuery(NewPaginatedQueryOptions(PITFilterWithVolumes{}))) + accounts, err := store.GetAccountsWithVolumes(context.Background(), NewGetAccountsQuery(NewPaginatedQueryOptions(PITFilterWithVolumes{}))) require.NoError(t, err) require.Len(t, accounts.Data, 7) }) t.Run("list using metadata", func(t *testing.T) { t.Parallel() - accounts, err := store.GetAccountsWithVolumes(ctx, NewGetAccountsQuery(NewPaginatedQueryOptions(PITFilterWithVolumes{}). + accounts, err := store.GetAccountsWithVolumes(context.Background(), NewGetAccountsQuery(NewPaginatedQueryOptions(PITFilterWithVolumes{}). WithQueryBuilder(query.Match("metadata[category]", "1")), )) require.NoError(t, err) @@ -72,7 +72,7 @@ func TestGetAccounts(t *testing.T) { t.Run("list before date", func(t *testing.T) { t.Parallel() - accounts, err := store.GetAccountsWithVolumes(ctx, NewGetAccountsQuery(NewPaginatedQueryOptions(PITFilterWithVolumes{ + accounts, err := store.GetAccountsWithVolumes(context.Background(), NewGetAccountsQuery(NewPaginatedQueryOptions(PITFilterWithVolumes{ PITFilter: PITFilter{ PIT: &now, }, @@ -83,7 +83,7 @@ func TestGetAccounts(t *testing.T) { t.Run("list with volumes", func(t *testing.T) { t.Parallel() - accounts, err := store.GetAccountsWithVolumes(ctx, NewGetAccountsQuery(NewPaginatedQueryOptions(PITFilterWithVolumes{ + accounts, err := store.GetAccountsWithVolumes(context.Background(), NewGetAccountsQuery(NewPaginatedQueryOptions(PITFilterWithVolumes{ ExpandVolumes: true, }).WithQueryBuilder(query.Match("address", "account:1")))) require.NoError(t, err) @@ -95,7 +95,7 @@ func TestGetAccounts(t *testing.T) { t.Run("list with volumes using PIT", func(t *testing.T) { t.Parallel() - accounts, err := store.GetAccountsWithVolumes(ctx, NewGetAccountsQuery(NewPaginatedQueryOptions(PITFilterWithVolumes{ + accounts, err := store.GetAccountsWithVolumes(context.Background(), NewGetAccountsQuery(NewPaginatedQueryOptions(PITFilterWithVolumes{ PITFilter: PITFilter{ PIT: &now, }, @@ -110,7 +110,7 @@ func TestGetAccounts(t *testing.T) { t.Run("list with effective volumes", func(t *testing.T) { t.Parallel() - accounts, err := store.GetAccountsWithVolumes(ctx, NewGetAccountsQuery(NewPaginatedQueryOptions(PITFilterWithVolumes{ + accounts, err := store.GetAccountsWithVolumes(context.Background(), NewGetAccountsQuery(NewPaginatedQueryOptions(PITFilterWithVolumes{ ExpandEffectiveVolumes: true, }).WithQueryBuilder(query.Match("address", "account:1")))) require.NoError(t, err) @@ -137,7 +137,7 @@ func TestGetAccounts(t *testing.T) { t.Run("list using filter on address", func(t *testing.T) { t.Parallel() - accounts, err := store.GetAccountsWithVolumes(ctx, NewGetAccountsQuery(NewPaginatedQueryOptions(PITFilterWithVolumes{}). + accounts, err := store.GetAccountsWithVolumes(context.Background(), NewGetAccountsQuery(NewPaginatedQueryOptions(PITFilterWithVolumes{}). WithQueryBuilder(query.Match("address", "account:")), )) require.NoError(t, err) @@ -145,7 +145,7 @@ func TestGetAccounts(t *testing.T) { }) t.Run("list using filter on multiple address", func(t *testing.T) { t.Parallel() - accounts, err := store.GetAccountsWithVolumes(ctx, NewGetAccountsQuery(NewPaginatedQueryOptions(PITFilterWithVolumes{}). + accounts, err := store.GetAccountsWithVolumes(context.Background(), NewGetAccountsQuery(NewPaginatedQueryOptions(PITFilterWithVolumes{}). WithQueryBuilder( query.Or( query.Match("address", "account:1"), @@ -158,7 +158,7 @@ func TestGetAccounts(t *testing.T) { }) t.Run("list using filter on balances", func(t *testing.T) { t.Parallel() - accounts, err := store.GetAccountsWithVolumes(ctx, NewGetAccountsQuery(NewPaginatedQueryOptions(PITFilterWithVolumes{}). + accounts, err := store.GetAccountsWithVolumes(context.Background(), NewGetAccountsQuery(NewPaginatedQueryOptions(PITFilterWithVolumes{}). WithQueryBuilder(query.Lt("balance[USD]", 0)), )) require.NoError(t, err) @@ -320,16 +320,17 @@ func TestGetAccount(t *testing.T) { func TestGetAccountWithVolumes(t *testing.T) { t.Parallel() store := newLedgerStore(t) + ctx := logging.TestingContext() bigInt, _ := big.NewInt(0).SetString("999999999999999999999999999999999999999999999999999999999999999999999999999999999999999", 10) - require.NoError(t, insertTransactions(context.Background(), store, + require.NoError(t, insertTransactions(ctx, store, *ledger.NewTransaction().WithPostings( ledger.NewPosting("world", "multi", "USD/2", bigInt), ), )) - accountWithVolumes, err := store.GetAccountWithVolumes(context.Background(), + accountWithVolumes, err := store.GetAccountWithVolumes(ctx, NewGetAccountQuery("multi").WithExpandVolumes()) require.NoError(t, err) require.Equal(t, &ledger.ExpandedAccount{ @@ -346,14 +347,15 @@ func TestGetAccountWithVolumes(t *testing.T) { func TestUpdateAccountMetadata(t *testing.T) { t.Parallel() store := newLedgerStore(t) + ctx := logging.TestingContext() - require.NoError(t, store.InsertLogs(context.Background(), + require.NoError(t, store.InsertLogs(ctx, ledger.NewSetMetadataOnAccountLog(ledger.Now(), "central_bank", metadata.Metadata{ "foo": "bar", }).ChainLog(nil), )) - account, err := store.GetAccountWithVolumes(context.Background(), NewGetAccountQuery("central_bank")) + account, err := store.GetAccountWithVolumes(ctx, NewGetAccountQuery("central_bank")) require.NoError(t, err) require.EqualValues(t, "bar", account.Metadata["foo"]) } @@ -361,14 +363,15 @@ func TestUpdateAccountMetadata(t *testing.T) { func TestCountAccounts(t *testing.T) { t.Parallel() store := newLedgerStore(t) + ctx := logging.TestingContext() - require.NoError(t, insertTransactions(context.Background(), store, + require.NoError(t, insertTransactions(ctx, store, *ledger.NewTransaction().WithPostings( ledger.NewPosting("world", "central_bank", "USD/2", big.NewInt(100)), ), )) - countAccounts, err := store.CountAccounts(context.Background(), NewGetAccountsQuery(NewPaginatedQueryOptions(PITFilterWithVolumes{}))) + countAccounts, err := store.CountAccounts(ctx, NewGetAccountsQuery(NewPaginatedQueryOptions(PITFilterWithVolumes{}))) require.NoError(t, err) require.EqualValues(t, 2, countAccounts) // world + central_bank } diff --git a/internal/storage/ledgerstore/balances.go b/internal/storage/ledgerstore/balances.go index ed93b803b..2132d085e 100644 --- a/internal/storage/ledgerstore/balances.go +++ b/internal/storage/ledgerstore/balances.go @@ -65,11 +65,12 @@ func (store *Store) GetAggregatedBalances(ctx context.Context, q GetAggregatedBa } ret, err := fetch[*Temp](store, ctx, func(selectQuery *bun.SelectQuery) *bun.SelectQuery { - moves := store.db. + moves := store.bucket.db. NewSelect(). Table(MovesTableName). ColumnExpr("distinct on (moves.account_address, moves.asset) moves.*"). Order("account_address", "asset", "moves.seq desc"). + Where("moves.ledger = ?", store.name). Apply(filterPIT(q.Options.Options.PIT, "insertion_date")) // todo(gfyrag): expose capability to use effective_date if needMetadata { @@ -77,7 +78,7 @@ func (store *Store) GetAggregatedBalances(ctx context.Context, q GetAggregatedBa moves = moves.Join(`join lateral ( select metadata from accounts_metadata am - where am.address = moves.account_address and (? is null or date <= ?) + where am.accounts_seq = moves.accounts_seq and (? is null or date <= ?) order by revision desc limit 1 ) am on true`, q.Options.Options.PIT, q.Options.Options.PIT) @@ -85,7 +86,7 @@ func (store *Store) GetAggregatedBalances(ctx context.Context, q GetAggregatedBa moves = moves.Join(`join lateral ( select metadata from accounts a - where a.address = moves.account_address + where a.seq = moves.accounts_seq ) accounts on true`) } } @@ -114,7 +115,7 @@ func (store *Store) GetBalance(ctx context.Context, address, asset string) (*big Balance *big.Int `bun:"balance,type:numeric"` } v, err := fetch[*Temp](store, ctx, func(query *bun.SelectQuery) *bun.SelectQuery { - return query.TableExpr("get_account_balance(?, ?) as balance", address, asset) + return query.TableExpr("get_account_balance(?, ?, ?) as balance", store.name, address, asset) }) if err != nil { return nil, err diff --git a/internal/storage/ledgerstore/bucket.go b/internal/storage/ledgerstore/bucket.go new file mode 100644 index 000000000..550955950 --- /dev/null +++ b/internal/storage/ledgerstore/bucket.go @@ -0,0 +1,147 @@ +package ledgerstore + +import ( + "context" + "database/sql" + _ "embed" + "fmt" + + "github.com/formancehq/ledger/internal/storage/sqlutils" + "github.com/formancehq/stack/libs/go-libs/migrations" + "github.com/pkg/errors" + "github.com/uptrace/bun" +) + +//go:embed migrations/0-init-schema.sql +var initSchema string + +type Bucket struct { + name string + db *bun.DB +} + +func (b *Bucket) Migrate(ctx context.Context) error { + return MigrateBucket(ctx, b.db, b.name) +} + +func (b *Bucket) GetMigrationsInfo(ctx context.Context) ([]migrations.Info, error) { + return getBucketMigrator(b.name).GetMigrations(ctx, b.db) +} + +func (b *Bucket) IsUpToDate(ctx context.Context) (bool, error) { + ret, err := getBucketMigrator(b.name).IsUpToDate(ctx, b.db) + if err != nil && errors.Is(err, migrations.ErrMissingVersionTable) { + return false, nil + } + return ret, err +} + +func (b *Bucket) Close() error { + return b.db.Close() +} + +func (b *Bucket) createLedgerStore(name string) (*Store, error) { + return New(b, name) +} + +func (b *Bucket) CreateLedgerStore(name string) (*Store, error) { + return b.createLedgerStore(name) +} + +func (b *Bucket) GetLedgerStore(name string) (*Store, error) { + return New(b, name) +} + +func (b *Bucket) IsInitialized(ctx context.Context) (bool, error) { + row := b.db.QueryRowContext(ctx, ` + select schema_name + from information_schema.schemata + where schema_name = ?; + `, b.name) + if row.Err() != nil { + return false, sqlutils.PostgresError(row.Err()) + } + var t string + if err := row.Scan(&t); err != nil { + if errors.Is(err, sql.ErrNoRows) { + return false, nil + } + } + return true, nil +} + +func registerMigrations(migrator *migrations.Migrator, name string) { + migrator.RegisterMigrations( + migrations.Migration{ + Name: "Init schema", + UpWithContext: func(ctx context.Context, tx bun.Tx) error { + + needV1Upgrade := false + row := tx.QueryRowContext(ctx, `select exists ( + select from pg_tables + where schemaname = ? and tablename = 'log' + )`, name) + if row.Err() != nil { + return row.Err() + } + var ret string + if err := row.Scan(&ret); err != nil { + panic(err) + } + needV1Upgrade = ret != "false" + + oldSchemaRenamed := fmt.Sprintf(name + oldSchemaRenameSuffix) + if needV1Upgrade { + _, err := tx.ExecContext(ctx, fmt.Sprintf(`alter schema "%s" rename to "%s"`, name, oldSchemaRenamed)) + if err != nil { + return errors.Wrap(err, "renaming old schema") + } + _, err = tx.ExecContext(ctx, fmt.Sprintf(`create schema if not exists "%s"`, name)) + if err != nil { + return errors.Wrap(err, "creating new schema") + } + } + + _, err := tx.ExecContext(ctx, initSchema) + if err != nil { + return errors.Wrap(err, "initializing new schema") + } + + if needV1Upgrade { + if err := migrateLogs(ctx, oldSchemaRenamed, name, tx); err != nil { + return errors.Wrap(err, "migrating logs") + } + + _, err = tx.ExecContext(ctx, fmt.Sprintf(`create table goose_db_version as table "%s".goose_db_version with no data`, oldSchemaRenamed)) + if err != nil { + return err + } + } + + return nil + }, + }, + ) +} + +func ConnectToBucket(connectionOptions sqlutils.ConnectionOptions, name string, hooks ...bun.QueryHook) (*Bucket, error) { + db, err := sqlutils.OpenDBWithSchema(connectionOptions, name, hooks...) + if err != nil { + return nil, sqlutils.PostgresError(err) + } + + return &Bucket{ + db: db, + name: name, + }, nil +} + +func getBucketMigrator(name string) *migrations.Migrator { + migrator := migrations.NewMigrator(migrations.WithSchema(name, true)) + registerMigrations(migrator, name) + return migrator +} + +func MigrateBucket(ctx context.Context, db bun.IDB, name string) error { + return getBucketMigrator(name).Up(ctx, db) +} diff --git a/internal/storage/ledgerstore/bucket_test.go b/internal/storage/ledgerstore/bucket_test.go new file mode 100644 index 000000000..67bc9a85a --- /dev/null +++ b/internal/storage/ledgerstore/bucket_test.go @@ -0,0 +1,71 @@ +package ledgerstore + +import ( + "math/big" + "testing" + + ledger "github.com/formancehq/ledger/internal" + "github.com/formancehq/stack/libs/go-libs/logging" + "github.com/formancehq/stack/libs/go-libs/metadata" + "github.com/google/uuid" + "github.com/stretchr/testify/require" +) + +func TestBuckets(t *testing.T) { + ctx := logging.TestingContext() + bucket := newBucket(t) + var ( + ledger0 = uuid.NewString() + ledger1 = uuid.NewString() + ) + ledger0Store, err := bucket.CreateLedgerStore(ledger0) + require.NoError(t, err) + + ledger1Store, err := bucket.CreateLedgerStore(ledger1) + require.NoError(t, err) + + txLedger0 := ledger.Transaction{ + ID: big.NewInt(0), + TransactionData: ledger.TransactionData{ + Postings: ledger.Postings{ + { + Source: "world", + Destination: "alice", + Amount: big.NewInt(100), + Asset: "USD", + }, + }, + Metadata: metadata.Metadata{}, + }, + } + + txLedger1 := ledger.Transaction{ + ID: big.NewInt(0), + TransactionData: ledger.TransactionData{ + Postings: ledger.Postings{ + { + Source: "world", + Destination: "alice", + Amount: big.NewInt(100), + Asset: "USD", + }, + }, + Metadata: metadata.Metadata{}, + }, + } + + require.NoError(t, ledger0Store.InsertLogs(ctx, + ledger.NewTransactionLog(&txLedger0, map[string]metadata.Metadata{}).ChainLog(nil), + )) + require.NoError(t, ledger1Store.InsertLogs(ctx, + ledger.NewTransactionLog(&txLedger1, map[string]metadata.Metadata{}).ChainLog(nil), + )) + + count, err := ledger0Store.CountTransactions(ctx, NewGetTransactionsQuery(PaginatedQueryOptions[PITFilterWithVolumes]{})) + require.NoError(t, err) + require.Equal(t, count, 1) + + count, err = ledger1Store.CountTransactions(ctx, NewGetTransactionsQuery(PaginatedQueryOptions[PITFilterWithVolumes]{})) + require.NoError(t, err) + require.Equal(t, count, 1) +} diff --git a/internal/storage/ledgerstore/logs.go b/internal/storage/ledgerstore/logs.go index 5b79deacf..0a7df28d3 100644 --- a/internal/storage/ledgerstore/logs.go +++ b/internal/storage/ledgerstore/logs.go @@ -25,6 +25,7 @@ const ( type Logs struct { bun.BaseModel `bun:"logs,alias:logs"` + Ledger string `bun:"ledger,type:varchar"` ID *paginate.BigInt `bun:"id,unique,type:numeric"` Type string `bun:"type,type:log_type"` Hash []byte `bun:"hash,type:bytea"` @@ -88,9 +89,9 @@ func (store *Store) InsertLogs(ctx context.Context, activeLogs ...*ledger.Chaine return store.withTransaction(ctx, func(tx bun.Tx) error { // Beware: COPY query is not supported by bun if the pgx driver is used. stmt, err := tx.Prepare(pq.CopyInSchema( - store.name, + store.bucket.name, LogTableName, - "id", "type", "hash", "date", "data", "idempotency_key", + "ledger", "id", "type", "hash", "date", "data", "idempotency_key", )) if err != nil { return storageerrors.PostgresError(err) @@ -104,6 +105,7 @@ func (store *Store) InsertLogs(ctx context.Context, activeLogs ...*ledger.Chaine } ls[i] = Logs{ + Ledger: store.name, ID: (*paginate.BigInt)(chainedLogs.ID), Type: chainedLogs.Type.String(), Hash: chainedLogs.Hash, @@ -112,7 +114,7 @@ func (store *Store) InsertLogs(ctx context.Context, activeLogs ...*ledger.Chaine IdempotencyKey: chainedLogs.IdempotencyKey, } - _, err = stmt.Exec(ls[i].ID, ls[i].Type, ls[i].Hash, ls[i].Date, RawMessage(ls[i].Data), chainedLogs.IdempotencyKey) + _, err = stmt.Exec(ls[i].Ledger, ls[i].ID, ls[i].Type, ls[i].Hash, ls[i].Date, RawMessage(ls[i].Data), chainedLogs.IdempotencyKey) if err != nil { return storageerrors.PostgresError(err) } @@ -133,6 +135,7 @@ func (store *Store) GetLastLog(ctx context.Context) (*ledger.ChainedLog, error) return query. Table(LogTableName). OrderExpr("id desc"). + Where("ledger = ?", store.name). Limit(1) }) if err != nil { @@ -163,7 +166,8 @@ func (store *Store) ReadLogWithIdempotencyKey(ctx context.Context, key string) ( Table(LogTableName). OrderExpr("id desc"). Limit(1). - Where("idempotency_key = ?", key) + Where("idempotency_key = ?", key). + Where("ledger = ?", store.name) }) if err != nil { return nil, err diff --git a/internal/storage/ledgerstore/main_test.go b/internal/storage/ledgerstore/main_test.go index ecc3c1373..0fc8d54d9 100644 --- a/internal/storage/ledgerstore/main_test.go +++ b/internal/storage/ledgerstore/main_test.go @@ -6,12 +6,14 @@ import ( "fmt" "os" "testing" + "time" - "github.com/formancehq/ledger/internal/storage/sqlutils" - "github.com/uptrace/bun" "github.com/uptrace/bun/dialect/pgdialect" + "github.com/uptrace/bun" + ledger "github.com/formancehq/ledger/internal" + "github.com/formancehq/ledger/internal/storage/sqlutils" "github.com/formancehq/stack/libs/go-libs/logging" "github.com/formancehq/stack/libs/go-libs/pgtesting" "github.com/google/uuid" @@ -49,31 +51,48 @@ type T interface { Cleanup(func()) } +func newBucket(t T, hooks ...bun.QueryHook) *Bucket { + name := uuid.NewString() + ctx := logging.TestingContext() + + pgDatabase := pgtesting.NewPostgresDatabase(t) + + connectionOptions := sqlutils.ConnectionOptions{ + DatabaseSourceName: pgDatabase.ConnString(), + Debug: testing.Verbose(), + MaxIdleConns: 40, + MaxOpenConns: 40, + ConnMaxIdleTime: time.Minute, + } + + bucket, err := ConnectToBucket(connectionOptions, name, hooks...) + require.NoError(t, err) + t.Cleanup(func() { + _ = bucket.Close() + }) + + require.NoError(t, bucket.Migrate(ctx)) + + return bucket +} + func newLedgerStore(t T, hooks ...bun.QueryHook) *Store { t.Helper() ledgerName := uuid.NewString() + ctx := logging.TestingContext() - _, err := bunDB.Exec(fmt.Sprintf(`create schema if not exists "%s"`, ledgerName)) + _, err := bunDB.ExecContext(ctx, fmt.Sprintf(`create schema if not exists "%s"`, ledgerName)) require.NoError(t, err) t.Cleanup(func() { - _, err = bunDB.Exec(fmt.Sprintf(`drop schema "%s" cascade`, ledgerName)) + _, err = bunDB.ExecContext(ctx, fmt.Sprintf(`drop schema "%s" cascade`, ledgerName)) require.NoError(t, err) }) - ledgerDB, err := sqlutils.OpenDBWithSchema(sqlutils.ConnectionOptions{ - DatabaseSourceName: pgtesting.Server().GetDSN(), - Debug: testing.Verbose(), - }, ledgerName, hooks...) - require.NoError(t, err) - - store, err := New(ledgerDB, ledgerName, func(ctx context.Context) error { - return nil - }) - require.NoError(t, err) + bucket := newBucket(t, hooks...) - _, err = store.Migrate(logging.TestingContext()) + store, err := bucket.CreateLedgerStore(ledgerName) require.NoError(t, err) return store diff --git a/internal/storage/ledgerstore/migrations.go b/internal/storage/ledgerstore/migrations.go deleted file mode 100644 index c1d89cd4d..000000000 --- a/internal/storage/ledgerstore/migrations.go +++ /dev/null @@ -1,89 +0,0 @@ -package ledgerstore - -import ( - "context" - _ "embed" - "fmt" - - "github.com/formancehq/stack/libs/go-libs/migrations" - "github.com/pkg/errors" - "github.com/uptrace/bun" -) - -func (store *Store) getMigrator() *migrations.Migrator { - migrator := migrations.NewMigrator(migrations.WithSchema(store.Name(), true)) - registerMigrations(migrator, store.name) - return migrator -} - -func (store *Store) Migrate(ctx context.Context) (bool, error) { - migrator := store.getMigrator() - - if err := migrator.Up(ctx, store.db); err != nil { - return false, err - } - - // TODO: Update migrations package to return modifications - return false, nil -} - -func (store *Store) GetMigrationsInfo(ctx context.Context) ([]migrations.Info, error) { - return store.getMigrator().GetMigrations(ctx, store.db) -} - -//go:embed migrations/0-init-schema.sql -var initSchema string - -func registerMigrations(migrator *migrations.Migrator, name string) { - migrator.RegisterMigrations( - migrations.Migration{ - Name: "Init schema", - UpWithContext: func(ctx context.Context, tx bun.Tx) error { - - needV1Upgrade := false - row := tx.QueryRowContext(ctx, `select exists ( - select from pg_tables - where schemaname = ? and tablename = 'log' - )`, name) - if row.Err() != nil { - return row.Err() - } - var ret string - if err := row.Scan(&ret); err != nil { - panic(err) - } - needV1Upgrade = ret != "false" - - oldSchemaRenamed := fmt.Sprintf(name + oldSchemaRenameSuffix) - if needV1Upgrade { - _, err := tx.ExecContext(ctx, fmt.Sprintf(`alter schema "%s" rename to "%s"`, name, oldSchemaRenamed)) - if err != nil { - return errors.Wrap(err, "renaming old schema") - } - _, err = tx.ExecContext(ctx, fmt.Sprintf(`create schema if not exists "%s"`, name)) - if err != nil { - return errors.Wrap(err, "creating new schema") - } - } - - _, err := tx.ExecContext(ctx, initSchema) - if err != nil { - return errors.Wrap(err, "initializing new schema") - } - - if needV1Upgrade { - if err := migrateLogs(ctx, oldSchemaRenamed, name, tx); err != nil { - return errors.Wrap(err, "migrating logs") - } - - _, err = tx.ExecContext(ctx, fmt.Sprintf(`create table goose_db_version as table "%s".goose_db_version with no data`, oldSchemaRenamed)) - if err != nil { - return err - } - } - - return nil - }, - }, - ) -} diff --git a/internal/storage/ledgerstore/migrations/0-init-schema.sql b/internal/storage/ledgerstore/migrations/0-init-schema.sql index ea3e62d4e..68666b052 100644 --- a/internal/storage/ledgerstore/migrations/0-init-schema.sql +++ b/internal/storage/ledgerstore/migrations/0-init-schema.sql @@ -1,25 +1,23 @@ -/** - Some utils - */ create aggregate aggregate_objects(jsonb) ( sfunc = jsonb_concat, stype = jsonb, initcond = '{}' ); -create function first_agg (anyelement, anyelement) +create function first_agg(anyelement, anyelement) returns anyelement language sql immutable strict parallel safe -as $$ +as +$$ select $1 $$; create aggregate first (anyelement) ( - sfunc = first_agg, - stype = anyelement, + sfunc = first_agg, + stype = anyelement, parallel = safe ); @@ -27,7 +25,8 @@ create function array_distinct(anyarray) returns anyarray language sql immutable -as $$ +as +$$ select array_agg(distinct x) from unnest($1) t(x); $$; @@ -35,84 +34,123 @@ $$; /** Define types **/ create type account_with_volumes as ( - address varchar, + address varchar, metadata jsonb, - volumes jsonb + volumes jsonb ); create type volumes as ( - inputs numeric, + inputs numeric, outputs numeric ); create type volumes_with_asset as ( - asset varchar, + asset varchar, volumes volumes ); /** Define tables **/ create table transactions ( - id numeric not null primary key, - timestamp timestamp without time zone not null, - reference varchar, - reverted_at timestamp without time zone, - updated_at timestamp without time zone, - postings varchar not null, - sources jsonb, - destinations jsonb, - sources_arrays jsonb, + seq bigserial primary key, + ledger varchar not null, + id numeric not null, + timestamp timestamp without time zone not null, + reference varchar, + reverted_at timestamp without time zone, + updated_at timestamp without time zone, + postings varchar not null, + sources jsonb, + destinations jsonb, + sources_arrays jsonb, destinations_arrays jsonb, - metadata jsonb not null default '{}'::jsonb + metadata jsonb not null default '{}'::jsonb ); +create unique index transactions_ledger on transactions (ledger, id); +create index transactions_date on transactions (timestamp); +create index transactions_metadata_index on transactions using gin (metadata jsonb_path_ops); +create index transactions_sources on transactions using gin (sources jsonb_path_ops); +create index transactions_destinations on transactions using gin (destinations jsonb_path_ops); +create index transactions_sources_arrays on transactions using gin (sources_arrays jsonb_path_ops); +create index transactions_destinations_arrays on transactions using gin (destinations_arrays jsonb_path_ops); + create table transactions_metadata ( - transaction_id numeric not null references transactions(id), - revision numeric default 0 not null, - date timestamp not null, - metadata jsonb not null default '{}'::jsonb, - - primary key (transaction_id, revision) + seq bigserial, + ledger varchar not null, + transactions_seq bigint references transactions (seq), + revision numeric default 0 not null, + date timestamp not null, + metadata jsonb not null default '{}'::jsonb, + + primary key (seq) ); +create index transactions_metadata_metadata on transactions_metadata using gin (metadata jsonb_path_ops); +create unique index transactions_metadata_ledger on transactions_metadata (ledger, transactions_seq, revision); +create index transactions_metadata_revisions on transactions_metadata(transactions_seq asc, revision desc) include (metadata, date); + create table accounts ( - address varchar primary key, - address_array jsonb not null, + seq bigserial primary key, + ledger varchar not null, + address varchar not null, + address_array jsonb not null, insertion_date timestamp not null, - updated_at timestamp not null, - metadata jsonb not null default '{}'::jsonb + updated_at timestamp not null, + metadata jsonb not null default '{}'::jsonb ); +create unique index accounts_ledger on accounts (ledger, address) include (seq); +create index accounts_address_array on accounts using gin (address_array jsonb_ops); +create index accounts_address_array_length on accounts (jsonb_array_length(address_array)); + create table accounts_metadata ( - address varchar references accounts(address), - metadata jsonb not null default '{}'::jsonb, - revision numeric default 0, - date timestamp + seq bigserial primary key, + ledger varchar not null, + accounts_seq bigint references accounts (seq), + metadata jsonb not null default '{}'::jsonb, + revision numeric default 0, + date timestamp ); +create unique index accounts_metadata_ledger on accounts_metadata (ledger, accounts_seq, revision); +create index accounts_metadata_metadata on accounts_metadata using gin (metadata jsonb_path_ops); +create index accounts_metadata_revisions on accounts_metadata(accounts_seq asc, revision desc) include (metadata, date); + create table moves ( - seq serial not null primary key , - transaction_id numeric not null references transactions(id), - account_address varchar not null, - account_address_array jsonb not null, - asset varchar not null, - amount numeric not null, - insertion_date timestamp not null, - effective_date timestamp not null, - post_commit_volumes volumes not null, + seq bigserial not null primary key, + ledger varchar not null, + transactions_seq bigint not null references transactions (seq), + accounts_seq bigint not null references accounts (seq), + account_address varchar not null, + account_address_array jsonb not null, + asset varchar not null, + amount numeric not null, + insertion_date timestamp not null, + effective_date timestamp not null, + post_commit_volumes volumes not null, post_commit_effective_volumes volumes default null, - is_source boolean not null + is_source boolean not null ); +create index moves_ledger on moves (ledger); +create index moves_range_dates on moves (account_address, asset, effective_date); +create index moves_account_address on moves (account_address); +create index moves_account_address_array on moves using gin (account_address_array jsonb_ops); +create index moves_account_address_array_length on moves (jsonb_array_length(account_address_array)); +create index moves_date on moves (effective_date); +create index moves_asset on moves (asset); +create index moves_post_commit_volumes on moves (accounts_seq, asset, seq); +create index moves_effective_post_commit_volumes on moves (accounts_seq, asset, effective_date desc); + create type log_type as enum - ( - 'NEW_TRANSACTION', + ('NEW_TRANSACTION', 'REVERTED_TRANSACTION', 'SET_METADATA', 'DELETE_METADATA' @@ -120,56 +158,29 @@ create type log_type as enum create table logs ( - id numeric not null primary key, - type log_type not null, - hash bytea not null, - date timestamp not null, - data jsonb not null, + seq bigserial primary key, + ledger varchar not null, + id numeric not null, + type log_type not null, + hash bytea not null, + date timestamp not null, + data jsonb not null, idempotency_key varchar(255) ); +create unique index logs_ledger on logs (ledger, id); + /** Define index **/ create function balance_from_volumes(v volumes) returns numeric language sql immutable -as $$ +as +$$ select v.inputs - v.outputs $$; -/** Index required for write part */ -create index moves_range_dates on moves (account_address, asset, effective_date); - -/** Index requires for read */ -create index transactions_date on transactions (timestamp); -create index transactions_metadata_index on transactions using gin (metadata jsonb_path_ops); -create index transactions_metadata_metadata on transactions_metadata using gin (metadata jsonb_path_ops); -create unique index transactions_metadata_revisions on transactions_metadata(transaction_id asc, revision desc) include (metadata, date); - ---create unique index transactions_revisions on transactions_metadata(id desc, revision desc); -create index transactions_sources on transactions using gin (sources jsonb_path_ops); -create index transactions_destinations on transactions using gin (destinations jsonb_path_ops); -create index transactions_sources_arrays on transactions using gin (sources_arrays jsonb_path_ops); -create index transactions_destinations_arrays on transactions using gin (destinations_arrays jsonb_path_ops); - -create index moves_account_address on moves (account_address); -create index moves_account_address_array on moves using gin (account_address_array jsonb_ops); -create index moves_account_address_array_length on moves (jsonb_array_length(account_address_array)); -create index moves_date on moves (effective_date); -create index moves_asset on moves (asset); -create index moves_balance on moves (balance_from_volumes(post_commit_volumes)); -create index moves_post_commit_volumes on moves(account_address, asset, seq); -create index moves_effective_post_commit_volumes on moves(account_address, asset, effective_date desc, seq desc); -create index moves_transactions_id on moves (transaction_id); - -create index accounts_address_array on accounts using gin (address_array jsonb_ops); -create index accounts_address_array_length on accounts (jsonb_array_length(address_array)); -create index accounts_metadata_index on accounts (address) include (metadata); -create index accounts_metadata_metadata on accounts_metadata using gin (metadata jsonb_path_ops); - -create unique index accounts_metadata_revisions on accounts_metadata(address asc, revision desc) include (metadata, date); - /** Define write functions **/ -- given the input : "a:b:c", the function will produce : '{"0": "a", "1": "b", "2": "c", "3": null}' @@ -177,39 +188,26 @@ create function explode_address(_address varchar) returns jsonb language sql immutable -as $$ +as +$$ select aggregate_objects(jsonb_build_object(data.number - 1, data.value)) -from ( - select row_number() over () as number, v.value - from ( - select unnest(string_to_array(_address, ':')) as value - union all - select null - ) v - ) data +from (select row_number() over () as number, v.value + from (select unnest(string_to_array(_address, ':')) as value + union all + select null) v) data $$; -create function get_account(_account_address varchar, _before timestamp default null) - returns setof accounts_metadata - language sql - stable -as $$ -select distinct on (address) * -from accounts_metadata t -where (_before is null or t.date <= _before) - and t.address = _account_address -order by address, revision desc -limit 1; -$$; - -create function get_transaction(_id numeric, _before timestamp default null) +create function get_transaction(_ledger varchar, _id numeric, _before timestamp default null) returns setof transactions language sql stable -as $$ +as +$$ select * from transactions t -where (_before is null or t.timestamp <= _before) and t.id = _id +where (_before is null or t.timestamp <= _before) + and t.id = _id + and ledger = _ledger order by id desc limit 1; $$; @@ -218,112 +216,134 @@ $$; -- but Postgres is extremely inefficient with distinct -- so the query implementation use a "hack" to emulate skip scan feature which Postgres lack natively -- see https://wiki.postgresql.org/wiki/Loose_indexscan for more information -create function get_all_assets() +create function get_all_assets(_ledger varchar) returns setof varchar language sql -as $$ -with recursive t as ( - select min(asset) as asset - from moves - union all - select - ( - select min(asset) - from moves - where asset > t.asset - ) - from t - where t.asset is not null -) -select asset from t where asset is not null +as +$$ +with recursive t as (select min(asset) as asset + from moves + where ledger = _ledger + union all + select (select min(asset) + from moves + where asset > t.asset + and ledger = _ledger) + from t + where t.asset is not null) +select asset +from t +where asset is not null union all -select null where exists(select 1 from moves where asset is null) +select null +where exists(select 1 from moves where asset is null and ledger = _ledger) $$; -create function get_latest_move_for_account_and_asset(_account_address varchar, _asset varchar, _before timestamp default null) +create function get_latest_move_for_account_and_asset(_ledger varchar, _account_address varchar, _asset varchar, + _before timestamp default null) returns setof moves language sql stable -as $$ +as +$$ select * from moves s -where (_before is null or s.effective_date <= _before) and s.account_address = _account_address and s.asset = _asset +where (_before is null or s.effective_date <= _before) + and s.account_address = _account_address + and s.asset = _asset + and ledger = _ledger order by effective_date desc, seq desc limit 1; $$; -create function upsert_account(_address varchar, _metadata jsonb, _date timestamp) - returns bool +create function upsert_account(_ledger varchar, _address varchar, _metadata jsonb, _date timestamp) + returns void language plpgsql -as $$ -declare - exists bool = false; +as +$$ begin - select true from accounts where address = _address into exists; - - insert into accounts(address, address_array, insertion_date, metadata, updated_at) - values (_address, to_json(string_to_array(_address, ':')), _date, coalesce(_metadata, '{}'::jsonb), _date) - on conflict (address) do update - set metadata = accounts.metadata || coalesce(_metadata, '{}'::jsonb), + insert into accounts(ledger, address, address_array, insertion_date, metadata, updated_at) + values (_ledger, _address, to_json(string_to_array(_address, ':')), _date, coalesce(_metadata, '{}'::jsonb), _date) + on conflict (ledger, address) do update + set metadata = accounts.metadata || coalesce(_metadata, '{}'::jsonb), updated_at = _date where not accounts.metadata @> coalesce(_metadata, '{}'::jsonb); - - return exists is null; end; $$; -create function delete_account_metadata(_address varchar, _key varchar, _date timestamp) +create function delete_account_metadata(_ledger varchar, _address varchar, _key varchar, _date timestamp) returns void language plpgsql -as $$ +as +$$ begin update accounts - set metadata = metadata - _key, updated_at = _date - where address = _address; + set metadata = metadata - _key, + updated_at = _date + where address = _address + and ledger = _ledger; end $$; -create function update_transaction_metadata(_id numeric, _metadata jsonb, _date timestamp) +create function update_transaction_metadata(_ledger varchar, _id numeric, _metadata jsonb, _date timestamp) returns void language plpgsql -as $$ +as +$$ begin update transactions - set metadata = metadata || _metadata, updated_at = _date - where id = _id; -- todo: add fill factor on transactions table ? + set metadata = metadata || _metadata, + updated_at = _date + where id = _id + and ledger = _ledger; -- todo: add fill factor on transactions table ? end; $$; -create function delete_transaction_metadata(_id numeric, _key varchar, _date timestamp) +create function delete_transaction_metadata(_ledger varchar, _id numeric, _key varchar, _date timestamp) returns void language plpgsql -as $$ +as +$$ begin update transactions - set metadata = metadata - _key, updated_at = _date - where id = _id; + set metadata = metadata - _key, + updated_at = _date + where id = _id + and ledger = _ledger; end; $$; -create function revert_transaction(_id numeric, _date timestamp) +create function revert_transaction(_ledger varchar, _id numeric, _date timestamp) returns void language sql -as $$ +as +$$ update transactions set reverted_at = _date -where id = _id; +where id = _id + and ledger = _ledger; $$; -create or replace function insert_move(_transaction_id numeric, _insertion_date timestamp without time zone, - _effective_date timestamp without time zone, _account_address varchar, _asset varchar, _amount numeric, _is_source bool, _new_account bool) +create or replace function insert_move( + _transactions_seq bigint, + _ledger varchar, + _insertion_date timestamp without time zone, + _effective_date timestamp without time zone, + _account_address varchar, + _asset varchar, + _amount numeric, + _is_source bool, + _account_exists bool) returns void language plpgsql -as $$ +as +$$ declare - _post_commit_volumes volumes = (0, 0)::volumes; + _post_commit_volumes volumes = (0, 0)::volumes; _effective_post_commit_volumes volumes = (0, 0)::volumes; - _seq numeric; + _seq bigint; + _account_seq bigint; begin -- todo: lock if we enable parallelism @@ -332,10 +352,13 @@ begin -- where address = _account_address -- for update; - if not _new_account then - select (post_commit_volumes).inputs, (post_commit_volumes).outputs into _post_commit_volumes + select seq from accounts where ledger = _ledger and address = _account_address into _account_seq; + + if _account_exists then + select (post_commit_volumes).inputs, (post_commit_volumes).outputs + into _post_commit_volumes from moves - where account_address = _account_address + where accounts_seq = _account_seq and asset = _asset order by seq desc limit 1; @@ -346,8 +369,9 @@ begin else select (post_commit_effective_volumes).inputs, (post_commit_effective_volumes).outputs into _effective_post_commit_volumes from moves - where account_address = _account_address - and asset = _asset and effective_date <= _effective_date + where accounts_seq = _account_seq + and asset = _asset + and effective_date <= _effective_date order by effective_date desc, seq desc limit 1; end if; @@ -361,108 +385,127 @@ begin _effective_post_commit_volumes.inputs = _effective_post_commit_volumes.inputs + _amount; end if; - insert into moves ( - insertion_date, - effective_date, - account_address, - asset, - transaction_id, - amount, - is_source, - account_address_array, - post_commit_volumes, - post_commit_effective_volumes - ) values (_insertion_date, _effective_date, _account_address, _asset, _transaction_id, - _amount, _is_source, (select to_json(string_to_array(_account_address, ':'))), - _post_commit_volumes, _effective_post_commit_volumes) + insert into moves (ledger, + insertion_date, + effective_date, + accounts_seq, + account_address, + asset, + transactions_seq, + amount, + is_source, + account_address_array, + post_commit_volumes, + post_commit_effective_volumes) + values (_ledger, + _insertion_date, + _effective_date, + _account_seq, + _account_address, + _asset, + _transactions_seq, + _amount, + _is_source, + (select to_json(string_to_array(_account_address, ':'))), + _post_commit_volumes, + _effective_post_commit_volumes) returning seq into _seq; - if not _new_account then + if _account_exists then update moves set post_commit_effective_volumes = - ( - (post_commit_effective_volumes).inputs + case when _is_source then 0 else _amount end, - (post_commit_effective_volumes).outputs + case when _is_source then _amount else 0 end + ((post_commit_effective_volumes).inputs + case when _is_source then 0 else _amount end, + (post_commit_effective_volumes).outputs + case when _is_source then _amount else 0 end ) - where account_address = _account_address and asset = _asset and effective_date > _effective_date; + where accounts_seq = _account_seq + and asset = _asset + and effective_date > _effective_date; update moves set post_commit_effective_volumes = - ( - (post_commit_effective_volumes).inputs + case when _is_source then 0 else _amount end, - (post_commit_effective_volumes).outputs + case when _is_source then _amount else 0 end + ((post_commit_effective_volumes).inputs + case when _is_source then 0 else _amount end, + (post_commit_effective_volumes).outputs + case when _is_source then _amount else 0 end ) - where account_address = _account_address and asset = _asset and effective_date = _effective_date and seq > _seq; + where accounts_seq = _account_seq + and asset = _asset + and effective_date = _effective_date + and seq > _seq; end if; end; $$; -create function insert_posting(_transaction_id numeric, _insertion_date timestamp without time zone, +create function insert_posting(_transaction_seq bigint, _ledger varchar, _insertion_date timestamp without time zone, _effective_date timestamp without time zone, posting jsonb, _account_metadata jsonb) returns void language plpgsql -as $$ +as +$$ declare - source_created bool; - destination_created bool; + _source_exists bool; + _destination_exists bool; begin - select upsert_account(posting->>'source', _account_metadata->(posting->>'source'), _insertion_date) into source_created; - select upsert_account(posting->>'destination', _account_metadata->(posting->>'destination'), _insertion_date) into destination_created; + + select true from accounts where ledger = _ledger and address = posting ->> 'source' into _source_exists; + select true from accounts where ledger = _ledger and address = posting ->> 'destination' into _destination_exists; + + perform upsert_account(_ledger, posting ->> 'source', _account_metadata -> (posting ->> 'source'), _insertion_date); + perform upsert_account(_ledger, posting ->> 'destination', _account_metadata -> (posting ->> 'destination'), + _insertion_date); -- todo: sometimes the balance is known at commit time (for sources != world), we need to forward the value to populate the pre_commit_aggregated_input and output - perform insert_move(_transaction_id, _insertion_date, _effective_date, - posting->>'source', posting->>'asset', (posting->>'amount')::numeric, true, source_created); - perform insert_move(_transaction_id, _insertion_date, _effective_date, - posting->>'destination', posting->>'asset', (posting->>'amount')::numeric, false, destination_created); + perform insert_move(_transaction_seq, _ledger, _insertion_date, _effective_date, + posting ->> 'source', posting ->> 'asset', (posting ->> 'amount')::numeric, true, + _source_exists); + perform insert_move(_transaction_seq, _ledger, _insertion_date, _effective_date, + posting ->> 'destination', posting ->> 'asset', (posting ->> 'amount')::numeric, false, + _destination_exists); end; $$; -- todo: maybe we could avoid plpgsql functions -create function insert_transaction(data jsonb, _date timestamp without time zone, _account_metadata jsonb) +create function insert_transaction(_ledger varchar, data jsonb, _date timestamp without time zone, + _account_metadata jsonb) returns void language plpgsql -as $$ +as +$$ declare posting jsonb; + _seq bigint; begin - insert into transactions (id, timestamp, updated_at, reference, postings, sources, + insert into transactions (ledger, id, timestamp, updated_at, reference, postings, sources, destinations, sources_arrays, destinations_arrays, metadata) - values ((data->>'id')::numeric, - (data->>'timestamp')::timestamp without time zone, - (data->>'timestamp')::timestamp without time zone, - data->>'reference', - jsonb_pretty(data->'postings'), - ( - select to_jsonb(array_agg(v->>'source')) as value - from jsonb_array_elements(data->'postings') v - ), - ( - select to_jsonb(array_agg(v->>'destination')) as value - from jsonb_array_elements(data->'postings') v - ), - ( - select to_jsonb(array_agg(explode_address(v->>'source'))) as value - from jsonb_array_elements(data->'postings') v - ), - ( - select to_jsonb(array_agg(explode_address(v->>'destination'))) as value - from jsonb_array_elements(data->'postings') v - ), - coalesce(data->'metadata', '{}'::jsonb) - ); - - for posting in (select jsonb_array_elements(data->'postings')) loop + values (_ledger, + (data ->> 'id')::numeric, + (data ->> 'timestamp')::timestamp without time zone, + (data ->> 'timestamp')::timestamp without time zone, + data ->> 'reference', + jsonb_pretty(data -> 'postings'), + (select to_jsonb(array_agg(v ->> 'source')) as value + from jsonb_array_elements(data -> 'postings') v), + (select to_jsonb(array_agg(v ->> 'destination')) as value + from jsonb_array_elements(data -> 'postings') v), + (select to_jsonb(array_agg(explode_address(v ->> 'source'))) as value + from jsonb_array_elements(data -> 'postings') v), + (select to_jsonb(array_agg(explode_address(v ->> 'destination'))) as value + from jsonb_array_elements(data -> 'postings') v), + coalesce(data -> 'metadata', '{}'::jsonb)) + returning seq into _seq; + + for posting in (select jsonb_array_elements(data -> 'postings')) + loop -- todo: sometimes the balance is known at commit time (for sources != world), we need to forward the value to populate the pre_commit_aggregated_input and output - perform insert_posting((data->>'id')::numeric, _date, (data->>'timestamp')::timestamp without time zone, posting, _account_metadata); + perform insert_posting(_seq, _ledger, _date, (data ->> 'timestamp')::timestamp without time zone, posting, + _account_metadata); end loop; - if data->'metadata' is not null and data->>'metadata' <> '()' then - insert into transactions_metadata (transaction_id, revision, date, metadata) values - ( - (data->>'id')::numeric, + if data -> 'metadata' is not null and data ->> 'metadata' <> '()' then + insert into transactions_metadata (ledger, transactions_seq, revision, date, metadata) + values (_ledger, + _seq, 0, - (data->>'timestamp')::timestamp without time zone, coalesce(data->'metadata', '{}'::jsonb) - ); + (data ->> 'timestamp')::timestamp without time zone, + coalesce(data -> 'metadata', '{}'::jsonb)); end if; end $$; @@ -470,27 +513,40 @@ $$; create function handle_log() returns trigger security definer language plpgsql -as $$ +as +$$ +declare + _key varchar; + _value jsonb; begin if new.type = 'NEW_TRANSACTION' then - perform insert_transaction(new.data->'transaction', new.date, new.data->'accountMetadata'); + perform insert_transaction(new.ledger, new.data -> 'transaction', new.date, new.data -> 'accountMetadata'); + for _key, _value in (select * from jsonb_each_text(new.data -> 'accountMetadata')) + loop + perform upsert_account(new.ledger, _key, _value, + (new.data -> 'transaction' ->> 'timestamp')::timestamp); + end loop; end if; if new.type = 'REVERTED_TRANSACTION' then - perform insert_transaction(new.data->'transaction', new.date, '{}'::jsonb); - perform revert_transaction((new.data->>'revertedTransactionID')::numeric, (new.data->'transaction'->>'timestamp')::timestamp); + perform insert_transaction(new.ledger, new.data -> 'transaction', new.date, '{}'::jsonb); + perform revert_transaction(new.ledger, (new.data ->> 'revertedTransactionID')::numeric, + (new.data -> 'transaction' ->> 'timestamp')::timestamp); end if; if new.type = 'SET_METADATA' then - if new.data->>'targetType' = 'TRANSACTION' then - perform update_transaction_metadata((new.data->>'targetId')::numeric, new.data->'metadata', new.date); + if new.data ->> 'targetType' = 'TRANSACTION' then + perform update_transaction_metadata(new.ledger, (new.data ->> 'targetId')::numeric, new.data -> 'metadata', + new.date); else - perform upsert_account((new.data->>'targetId')::varchar, new.data -> 'metadata', new.date); + perform upsert_account(new.ledger, (new.data ->> 'targetId')::varchar, new.data -> 'metadata', new.date); end if; end if; if new.type = 'DELETE_METADATA' then - if new.data->>'targetType' = 'TRANSACTION' then - perform delete_transaction_metadata((new.data->>'targetId')::numeric, new.data->>'key', new.date); + if new.data ->> 'targetType' = 'TRANSACTION' then + perform delete_transaction_metadata(new.ledger, (new.data ->> 'targetId')::numeric, new.data ->> 'key', + new.date); else - perform delete_account_metadata((new.data->>'targetId')::varchar, new.data ->>'key', new.date); + perform delete_account_metadata(new.ledger, (new.data ->> 'targetId')::varchar, new.data ->> 'key', + new.date); end if; end if; @@ -498,122 +554,110 @@ begin end; $$; -create trigger insert_log after insert on logs - for each row execute procedure handle_log(); - create function update_account_metadata_history() returns trigger security definer language plpgsql -as $$ +as +$$ begin - insert into accounts_metadata (address, revision, date, metadata) values (new.address, ( - select revision + 1 - from accounts_metadata - where accounts_metadata.address = new.address - order by revision desc - limit 1 - ), new.updated_at, new.metadata); + insert into accounts_metadata (ledger, accounts_seq, revision, date, metadata) + values (new.ledger, new.seq, (select revision + 1 + from accounts_metadata + where accounts_metadata.accounts_seq = new.seq + order by revision desc + limit 1), new.updated_at, new.metadata); return new; end; $$; -create trigger update_account after update on accounts - for each row execute procedure update_account_metadata_history(); - create function insert_account_metadata_history() returns trigger security definer language plpgsql -as $$ +as +$$ begin - insert into accounts_metadata (address, revision, date, metadata) values (new.address, 1, new.insertion_date, new.metadata); + insert into accounts_metadata (ledger, accounts_seq, revision, date, metadata) + values (new.ledger, new.seq, 1, new.insertion_date, new.metadata); return new; end; $$; -create trigger insert_account after insert on accounts - for each row execute procedure insert_account_metadata_history(); - create function update_transaction_metadata_history() returns trigger security definer language plpgsql -as $$ +as +$$ begin - insert into transactions_metadata (transaction_id, revision, date, metadata) values (new.id, ( - select revision + 1 - from transactions_metadata - where transactions_metadata.transaction_id = new.id - order by revision desc - limit 1 - ), new.updated_at, new.metadata); + insert into transactions_metadata (ledger, transactions_seq, revision, date, metadata) + values (new.ledger, new.seq, (select revision + 1 + from transactions_metadata + where transactions_metadata.transactions_seq = new.seq + order by revision desc + limit 1), new.updated_at, new.metadata); return new; end; $$; -create trigger update_transaction after update on transactions - for each row execute procedure update_transaction_metadata_history(); - create function insert_transaction_metadata_history() returns trigger security definer language plpgsql -as $$ +as +$$ begin - insert into transactions_metadata (transaction_id, revision, date, metadata) values (new.id, 1, new.timestamp, new.metadata); + insert into transactions_metadata (ledger, transactions_seq, revision, date, metadata) + values (new.ledger, new.seq, 1, new.timestamp, new.metadata); return new; end; $$; -create trigger insert_transaction after insert on transactions - for each row execute procedure insert_transaction_metadata_history(); - -create or replace function get_all_account_effective_volumes(_account varchar, _before timestamp default null) +create or replace function get_all_account_effective_volumes(_ledger varchar, _account varchar, _before timestamp default null) returns setof volumes_with_asset language sql stable -as $$ -with - all_assets as ( - select v.v as asset - from get_all_assets() v - ), - moves as ( - select m.* - from all_assets assets - join lateral ( - select * - from moves s - where (_before is null or s.effective_date <= _before) and s.account_address = _account and s.asset = assets.asset - order by effective_date desc, seq desc - limit 1 - ) m on true - ) +as +$$ +with all_assets as (select v.v as asset + from get_all_assets(_ledger) v), + moves as (select m.* + from all_assets assets + join lateral ( + select * + from moves s + where (_before is null or s.effective_date <= _before) + and s.account_address = _account + and s.asset = assets.asset + and s.ledger = _ledger + order by effective_date desc, seq desc + limit 1 + ) m on true) select moves.asset, moves.post_commit_effective_volumes from moves $$; -create or replace function get_all_account_volumes(_account varchar, _before timestamp default null) +create or replace function get_all_account_volumes(_ledger varchar, _account varchar, _before timestamp default null) returns setof volumes_with_asset language sql stable -as $$ -with - all_assets as ( - select v.v as asset - from get_all_assets() v - ), - moves as ( - select m.* - from all_assets assets join lateral ( - select * - from moves s - where (_before is null or s.insertion_date <= _before) and s.account_address = _account and s.asset = assets.asset - order by seq desc - limit 1 - ) m on true - ) +as +$$ +with all_assets as (select v.v as asset + from get_all_assets(_ledger) v), + moves as (select m.* + from all_assets assets + join lateral ( + select * + from moves s + where (_before is null or s.insertion_date <= _before) + and s.account_address = _account + and s.asset = assets.asset + and s.ledger = _ledger + order by seq desc + limit 1 + ) m on true) select moves.asset, moves.post_commit_volumes from moves $$; @@ -622,42 +666,52 @@ create function volumes_to_jsonb(v volumes_with_asset) returns jsonb language sql immutable -as $$ +as +$$ select ('{"' || v.asset || '": {"input": ' || (v.volumes).inputs || ', "output": ' || (v.volumes).outputs || '}}')::jsonb $$; -create function get_account_aggregated_effective_volumes(_account_address varchar, _before timestamp default null) +create function get_account_aggregated_effective_volumes(_ledger varchar, _account_address varchar, + _before timestamp default null) returns jsonb language sql stable -as $$ +as +$$ select aggregate_objects(volumes_to_jsonb(volumes_with_asset)) -from get_all_account_effective_volumes(_account_address, _before := _before) volumes_with_asset +from get_all_account_effective_volumes(_ledger, _account_address, _before := _before) volumes_with_asset $$; -create function get_account_aggregated_volumes(_account_address varchar, _before timestamp default null) +create function get_account_aggregated_volumes(_ledger varchar, _account_address varchar, + _before timestamp default null) returns jsonb language sql stable parallel safe -as $$ +as +$$ select aggregate_objects(volumes_to_jsonb(volumes_with_asset)) -from get_all_account_volumes(_account_address, _before := _before) volumes_with_asset +from get_all_account_volumes(_ledger, _account_address, _before := _before) volumes_with_asset $$; -create function get_account_balance(_account varchar, _asset varchar, _before timestamp default null) +create function get_account_balance(_ledger varchar, _account varchar, _asset varchar, _before timestamp default null) returns numeric language sql stable -as $$ +as +$$ select (post_commit_volumes).inputs - (post_commit_volumes).outputs from moves s -where (_before is null or s.effective_date <= _before) and s.account_address = _account and s.asset = _asset +where (_before is null or s.effective_date <= _before) + and s.account_address = _account + and s.asset = _asset + and s.ledger = _ledger order by seq desc limit 1 $$; create function aggregate_ledger_volumes( + _ledger varchar, _before timestamp default null, _accounts varchar[] default null, _assets varchar[] default null @@ -665,47 +719,75 @@ create function aggregate_ledger_volumes( returns setof volumes_with_asset language sql stable -as $$ -with - moves as ( - select distinct on (m.account_address, m.asset) m.* - from moves m - where (_before is null or m.effective_date <= _before) and - (_accounts is null or account_address = any(_accounts)) and - (_assets is null or asset = any(_assets)) - order by account_address, asset, m.seq desc - ) -select v.asset, (sum((v.post_commit_effective_volumes).inputs), sum((v.post_commit_effective_volumes).outputs)) +as +$$ +with moves as (select distinct on (m.account_address, m.asset) m.* + from moves m + where (_before is null or m.effective_date <= _before) + and (_accounts is null or account_address = any (_accounts)) + and (_assets is null or asset = any (_assets)) + and m.ledger = _ledger + order by account_address, asset, m.seq desc) +select v.asset, + (sum((v.post_commit_effective_volumes).inputs), sum((v.post_commit_effective_volumes).outputs)) from moves v group by v.asset $$; -create function get_aggregated_effective_volumes_for_transaction(tx numeric) returns jsonb +create function get_aggregated_effective_volumes_for_transaction(_ledger varchar, tx numeric) returns jsonb stable language sql as $$ select aggregate_objects(jsonb_build_object(data.account_address, data.aggregated)) -from ( - select distinct on (move.account_address, move.asset) move.account_address, - volumes_to_jsonb((move.asset, first(move.post_commit_effective_volumes))) as aggregated - from moves move - where move.transaction_id = tx - group by move.account_address, move.asset - ) data +from (select distinct on (move.account_address, move.asset) move.account_address, + volumes_to_jsonb((move.asset, first(move.post_commit_effective_volumes))) as aggregated + from moves move + where move.transactions_seq = tx + and ledger = _ledger + group by move.account_address, move.asset) data $$; -create function get_aggregated_volumes_for_transaction(tx numeric) returns jsonb +create function get_aggregated_volumes_for_transaction(_ledger varchar, tx numeric) returns jsonb stable language sql as $$ select aggregate_objects(jsonb_build_object(data.account_address, data.aggregated)) -from ( - select distinct on (move.account_address, move.asset) move.account_address, - volumes_to_jsonb((move.asset, first(move.post_commit_volumes))) as aggregated - from moves move - where move.transaction_id = tx - group by move.account_address, move.asset - ) data +from (select distinct on (move.account_address, move.asset) move.account_address, + volumes_to_jsonb((move.asset, first(move.post_commit_volumes))) as aggregated + from moves move + where move.transactions_seq = tx + and ledger = _ledger + group by move.account_address, move.asset) data $$; + +create trigger "insert_log" + after insert + on "logs" + for each row +execute procedure handle_log(); + +create trigger "update_account" + after update + on "accounts" + for each row +execute procedure update_account_metadata_history(); + +create trigger "insert_account" + after insert + on "accounts" + for each row +execute procedure insert_account_metadata_history(); + +create trigger "update_transaction" + after update + on "transactions" + for each row +execute procedure update_transaction_metadata_history(); + +create trigger "insert_transaction" + after insert + on "transactions" + for each row +execute procedure insert_transaction_metadata_history(); \ No newline at end of file diff --git a/internal/storage/ledgerstore/store.go b/internal/storage/ledgerstore/store.go index 49ae647c0..f3fa324b6 100644 --- a/internal/storage/ledgerstore/store.go +++ b/internal/storage/ledgerstore/store.go @@ -3,18 +3,15 @@ package ledgerstore import ( "context" "database/sql" - "fmt" - "github.com/formancehq/ledger/internal/storage/sqlutils" + "github.com/formancehq/stack/libs/go-libs/migrations" _ "github.com/jackc/pgx/v5/stdlib" - "github.com/pkg/errors" "github.com/uptrace/bun" ) type Store struct { - db *bun.DB - onDelete func(ctx context.Context) error + bucket *Bucket name string } @@ -23,55 +20,30 @@ func (store *Store) Name() string { return store.name } -func (store *Store) GetDatabase() *bun.DB { - return store.db +func (store *Store) GetDB() *bun.DB { + return store.bucket.db } -func (store *Store) Delete(ctx context.Context) error { - _, err := store.db.ExecContext(ctx, "delete schema ? cascade", store.name) - if err != nil { - return err - } - return errors.Wrap(store.onDelete(ctx), "deleting ledger store") -} - -func (store *Store) prepareTransaction(ctx context.Context) (bun.Tx, error) { - txOptions := &sql.TxOptions{} - - tx, err := store.db.BeginTx(ctx, txOptions) - if err != nil { - return tx, err - } - if _, err := tx.Exec(fmt.Sprintf(`set search_path = "%s"`, store.Name())); err != nil { - return tx, err - } - return tx, nil +func (store *Store) withTransaction(ctx context.Context, callback func(tx bun.Tx) error) error { + return store.bucket.db.RunInTx(ctx, &sql.TxOptions{}, func(ctx context.Context, tx bun.Tx) error { + return callback(tx) + }) } -func (store *Store) withTransaction(ctx context.Context, callback func(tx bun.Tx) error) error { - tx, err := store.prepareTransaction(ctx) - if err != nil { - return err - } - if err := callback(tx); err != nil { - _ = tx.Rollback() - return sqlutils.PostgresError(err) - } - return tx.Commit() +func (store *Store) IsUpToDate(ctx context.Context) (bool, error) { + return store.bucket.IsUpToDate(ctx) } -func (store *Store) IsSchemaUpToDate(ctx context.Context) (bool, error) { - return store.getMigrator().IsUpToDate(ctx, store.db) +func (store *Store) GetMigrationsInfo(ctx context.Context) ([]migrations.Info, error) { + return store.bucket.GetMigrationsInfo(ctx) } func New( - db *bun.DB, + bucket *Bucket, name string, - onDelete func(ctx context.Context) error, ) (*Store, error) { return &Store{ - db: db, - name: name, - onDelete: onDelete, + bucket: bucket, + name: name, }, nil } diff --git a/internal/storage/ledgerstore/store_benchmarks_test.go b/internal/storage/ledgerstore/store_benchmarks_test.go index 0ce316603..1a3857f50 100644 --- a/internal/storage/ledgerstore/store_benchmarks_test.go +++ b/internal/storage/ledgerstore/store_benchmarks_test.go @@ -24,6 +24,7 @@ import ( var nbTransactions = flag.Int("transactions", 10000, "number of transactions to create") var batch = flag.Int("batch", 1000, "logs batching") +var ledgers = flag.Int("ledgers", 100, "number of ledger for multi ledgers benchmarks") type bunContextHook struct{} @@ -134,6 +135,82 @@ var scenarios = []scenario{ require.NoError(b, store.InsertLogs(ctx, lastLog)) } + return &scenarioInfo{ + nbAccounts: nbAccounts, + } + }, + }, + { + name: "multi-ledger", + setup: func(ctx context.Context, b *testing.B, store *Store) *scenarioInfo { + var lastLog *ledger.ChainedLog + + nbAccounts := *batch / 2 + loadData := func(store *Store) { + for i := 0; i < *nbTransactions/(*batch); i++ { + logs := make([]*ledger.ChainedLog, 0) + appendLog := func(log *ledger.Log) { + chainedLog := log.ChainLog(lastLog) + logs = append(logs, chainedLog) + lastLog = chainedLog + } + for j := 0; j < (*batch); j += 2 { + provision := big.NewInt(10000) + itemPrice := provision.Div(provision, big.NewInt(2)) + fees := itemPrice.Div(itemPrice, big.NewInt(100)) // 1% + + appendLog(ledger.NewTransactionLog( + ledger.NewTransaction(). + WithPostings(ledger.NewPosting( + "world", fmt.Sprintf("player:%d", j/2), "USD/2", provision, + )). + WithID(big.NewInt(int64(i*(*batch)+j))). + WithDate(now.Add(time.Minute*time.Duration(i*(*batch)+j))), + map[string]metadata.Metadata{}, + )) + appendLog(ledger.NewTransactionLog( + ledger.NewTransaction(). + WithPostings( + ledger.NewPosting(fmt.Sprintf("player:%d", j/2), "seller", "USD/2", itemPrice), + ledger.NewPosting("seller", "fees", "USD/2", fees), + ). + WithID(big.NewInt(int64(i*(*batch)+j+1))). + WithDate(now.Add(time.Minute*time.Duration(i*(*batch)+j))), + map[string]metadata.Metadata{}, + )) + status := "pending" + if j%8 == 0 { + status = "terminated" + } + appendLog(ledger.NewSetMetadataLog(now.Add(time.Minute*time.Duration(i*(*batch)+j)), ledger.SetMetadataLogPayload{ + TargetType: ledger.MetaTargetTypeTransaction, + TargetID: big.NewInt(int64(i*(*batch) + j + 1)), + Metadata: map[string]string{ + "status": status, + }, + })) + } + require.NoError(b, store.InsertLogs(ctx, logs...)) + } + + for i := 0; i < nbAccounts; i++ { + lastLog = ledger.NewSetMetadataLog(now, ledger.SetMetadataLogPayload{ + TargetType: ledger.MetaTargetTypeAccount, + TargetID: fmt.Sprintf("player:%d", i), + Metadata: map[string]string{ + "level": fmt.Sprint(i % 4), + }, + }).ChainLog(lastLog) + require.NoError(b, store.InsertLogs(ctx, lastLog)) + } + } + + for i := 0; i < *ledgers; i++ { + store := newLedgerStore(b) + loadData(store) + } + loadData(store) + return &scenarioInfo{ nbAccounts: nbAccounts, } @@ -152,13 +229,13 @@ func reportMetrics(ctx context.Context, b *testing.B, store *Store) { IdxTupFetch int `bun:"idx_tup_fetch"` } ret := make([]stat, 0) - err := store.db.NewSelect(). + err := store.GetDB().NewSelect(). Table("pg_stat_user_indexes"). Where("schemaname = ?", store.name). Scan(ctx, &ret) require.NoError(b, err) - tabWriter := tabwriter.NewWriter(os.Stdout, 8, 8, 0, '\t', 0) + tabWriter := tabwriter.NewWriter(os.Stderr, 8, 8, 0, '\t', 0) defer func() { require.NoError(b, tabWriter.Flush()) }() @@ -176,7 +253,7 @@ func reportMetrics(ctx context.Context, b *testing.B, store *Store) { func reportTableSizes(ctx context.Context, b *testing.B, store *Store) { - tabWriter := tabwriter.NewWriter(os.Stdout, 12, 8, 0, '\t', 0) + tabWriter := tabwriter.NewWriter(os.Stderr, 12, 8, 0, '\t', 0) defer func() { require.NoError(b, tabWriter.Flush()) }() @@ -190,42 +267,42 @@ func reportTableSizes(ctx context.Context, b *testing.B, store *Store) { "transactions", "accounts", "moves", "logs", "transactions_metadata", "accounts_metadata", } { totalRelationSize := "" - err := store.db.DB.QueryRowContext(ctx, fmt.Sprintf(`select pg_size_pretty(pg_total_relation_size('%s'))`, table)). + err := store.GetDB().DB.QueryRowContext(ctx, fmt.Sprintf(`select pg_size_pretty(pg_total_relation_size('%s'))`, table)). Scan(&totalRelationSize) require.NoError(b, err) tableSize := "" - err = store.db.DB.QueryRowContext(ctx, fmt.Sprintf(`select pg_size_pretty(pg_table_size('%s'))`, table)). + err = store.GetDB().DB.QueryRowContext(ctx, fmt.Sprintf(`select pg_size_pretty(pg_table_size('%s'))`, table)). Scan(&tableSize) require.NoError(b, err) relationSize := "" - err = store.db.DB.QueryRowContext(ctx, fmt.Sprintf(`select pg_size_pretty(pg_relation_size('%s'))`, table)). + err = store.GetDB().DB.QueryRowContext(ctx, fmt.Sprintf(`select pg_size_pretty(pg_relation_size('%s'))`, table)). Scan(&relationSize) require.NoError(b, err) indexesSize := "" - err = store.db.DB.QueryRowContext(ctx, fmt.Sprintf(`select pg_size_pretty(pg_indexes_size('%s'))`, table)). + err = store.GetDB().DB.QueryRowContext(ctx, fmt.Sprintf(`select pg_size_pretty(pg_indexes_size('%s'))`, table)). Scan(&indexesSize) require.NoError(b, err) mainSize := "" - err = store.db.DB.QueryRowContext(ctx, fmt.Sprintf(`select pg_size_pretty(pg_relation_size('%s', 'main'))`, table)). + err = store.GetDB().DB.QueryRowContext(ctx, fmt.Sprintf(`select pg_size_pretty(pg_relation_size('%s', 'main'))`, table)). Scan(&mainSize) require.NoError(b, err) fsmSize := "" - err = store.db.DB.QueryRowContext(ctx, fmt.Sprintf(`select pg_size_pretty(pg_relation_size('%s', 'fsm'))`, table)). + err = store.GetDB().DB.QueryRowContext(ctx, fmt.Sprintf(`select pg_size_pretty(pg_relation_size('%s', 'fsm'))`, table)). Scan(&fsmSize) require.NoError(b, err) vmSize := "" - err = store.db.DB.QueryRowContext(ctx, fmt.Sprintf(`select pg_size_pretty(pg_relation_size('%s', 'vm'))`, table)). + err = store.GetDB().DB.QueryRowContext(ctx, fmt.Sprintf(`select pg_size_pretty(pg_relation_size('%s', 'vm'))`, table)). Scan(&vmSize) require.NoError(b, err) initSize := "" - err = store.db.DB.QueryRowContext(ctx, fmt.Sprintf(`select pg_size_pretty(pg_relation_size('%s', 'init'))`, table)). + err = store.GetDB().DB.QueryRowContext(ctx, fmt.Sprintf(`select pg_size_pretty(pg_relation_size('%s', 'init'))`, table)). Scan(&initSize) require.NoError(b, err) @@ -251,7 +328,7 @@ func BenchmarkList(b *testing.B) { } }() - _, err := store.db.Exec("VACUUM FULL ANALYZE") + _, err := store.GetDB().Exec("VACUUM FULL ANALYZE") require.NoError(b, err) runAllWithPIT := func(b *testing.B, pit *ledger.Time) { diff --git a/internal/storage/ledgerstore/store_test.go b/internal/storage/ledgerstore/store_test.go index 2f0e956e9..29fb5ed3d 100644 --- a/internal/storage/ledgerstore/store_test.go +++ b/internal/storage/ledgerstore/store_test.go @@ -2,27 +2,12 @@ package ledgerstore import ( "context" - "testing" ledger "github.com/formancehq/ledger/internal" "github.com/formancehq/stack/libs/go-libs/collectionutils" "github.com/formancehq/stack/libs/go-libs/metadata" - "github.com/stretchr/testify/require" ) -func TestInitializeStore(t *testing.T) { - t.Parallel() - store := newLedgerStore(t) - - modified, err := store.Migrate(context.Background()) - require.NoError(t, err) - require.False(t, modified) - - migrationInfos, err := store.GetMigrationsInfo(context.Background()) - require.NoError(t, err) - require.Len(t, migrationInfos, 1) -} - // TODO: remove that func insertTransactions(ctx context.Context, s *Store, txs ...ledger.Transaction) error { var previous *ledger.ChainedLog diff --git a/internal/storage/ledgerstore/transactions.go b/internal/storage/ledgerstore/transactions.go index 7bb05de33..43951a803 100644 --- a/internal/storage/ledgerstore/transactions.go +++ b/internal/storage/ledgerstore/transactions.go @@ -145,7 +145,7 @@ func (store *Store) buildTransactionQuery(p PITFilterWithVolumes, query *bun.Sel selectMetadata := query.NewSelect(). Table("transactions_metadata"). - Where("transactions.id = transactions_metadata.transaction_id"). + Where("transactions.seq = transactions_metadata.transactions_seq"). Order("revision desc"). Limit(1) @@ -153,7 +153,8 @@ func (store *Store) buildTransactionQuery(p PITFilterWithVolumes, query *bun.Sel selectMetadata = selectMetadata.Where("date <= ?", p.PIT) } - query = query.Table("transactions") + query = query.Table("transactions"). + Where("transactions.ledger = ?", store.name) if p.PIT != nil && !p.PIT.IsZero() { query = query. @@ -167,10 +168,10 @@ func (store *Store) buildTransactionQuery(p PITFilterWithVolumes, query *bun.Sel } if p.ExpandEffectiveVolumes { - query = query.ColumnExpr("get_aggregated_effective_volumes_for_transaction(transactions.id) as post_commit_effective_volumes") + query = query.ColumnExpr("get_aggregated_effective_volumes_for_transaction(?, transactions.seq) as post_commit_effective_volumes", store.name) } if p.ExpandVolumes { - query = query.ColumnExpr("get_aggregated_volumes_for_transaction(transactions.id) as post_commit_volumes") + query = query.ColumnExpr("get_aggregated_volumes_for_transaction(?, transactions.seq) as post_commit_volumes", store.name) } return query } @@ -314,8 +315,9 @@ func (store *Store) GetTransaction(ctx context.Context, txId *big.Int) (*ledger. return query. Table("transactions"). ColumnExpr(`transactions.id, transactions.reference, transactions.postings, transactions.timestamp, transactions.reverted_at, tm.metadata`). - Join("left join transactions_metadata tm on tm.transaction_id = transactions.id"). + Join("left join transactions_metadata tm on tm.transactions_seq = transactions.seq"). Where("transactions.id = ?", (*paginate.BigInt)(txId)). + Where("transactions.ledger = ?", store.name). Order("tm.revision desc"). Limit(1) }) @@ -332,8 +334,9 @@ func (store *Store) GetTransactionByReference(ctx context.Context, ref string) ( return query. Table("transactions"). ColumnExpr(`transactions.id, transactions.reference, transactions.postings, transactions.timestamp, transactions.reverted_at, tm.metadata`). - Join("left join transactions_metadata tm on tm.transaction_id = transactions.id"). + Join("left join transactions_metadata tm on tm.transactions_seq = transactions.seq"). Where("transactions.reference = ?", ref). + Where("transactions.ledger = ?", store.name). Order("tm.revision desc"). Limit(1) }) @@ -350,8 +353,9 @@ func (store *Store) GetLastTransaction(ctx context.Context) (*ledger.ExpandedTra return query. Table("transactions"). ColumnExpr(`transactions.id, transactions.reference, transactions.postings, transactions.timestamp, transactions.reverted_at, tm.metadata`). - Join("left join transactions_metadata tm on tm.transaction_id = transactions.id"). - Order("transactions.id desc", "tm.revision desc"). + Join("left join transactions_metadata tm on tm.transactions_seq = transactions.seq"). + Order("transactions.seq desc", "tm.revision desc"). + Where("transactions.ledger = ?", store.name). Limit(1) }) if err != nil { diff --git a/internal/storage/ledgerstore/transactions_test.go b/internal/storage/ledgerstore/transactions_test.go index a32725f1f..fce0c543e 100644 --- a/internal/storage/ledgerstore/transactions_test.go +++ b/internal/storage/ledgerstore/transactions_test.go @@ -337,6 +337,7 @@ func TestInsertTransactions(t *testing.T) { t.Parallel() store := newLedgerStore(t) now := ledger.Now() + ctx := logging.TestingContext() t.Run("success inserting transaction", func(t *testing.T) { tx1 := ledger.ExpandedTransaction{ @@ -376,7 +377,7 @@ func TestInsertTransactions(t *testing.T) { err := insertTransactions(context.Background(), store, tx1.Transaction) require.NoError(t, err, "inserting transaction should not fail") - tx, err := store.GetTransactionWithVolumes(context.Background(), NewGetTransactionQuery(big.NewInt(0)). + tx, err := store.GetTransactionWithVolumes(ctx, NewGetTransactionQuery(big.NewInt(0)). WithExpandVolumes()) require.NoError(t, err) internaltesting.RequireEqual(t, tx1, *tx) @@ -1039,3 +1040,37 @@ func TestGetTransactions(t *testing.T) { }) } } + +func TestGetLastTransaction(t *testing.T) { + t.Parallel() + store := newLedgerStore(t) + ctx := logging.TestingContext() + + tx1 := ledger.NewTransaction(). + WithIDUint64(0). + WithPostings( + ledger.NewPosting("world", "alice", "USD", big.NewInt(100)), + ) + tx2 := ledger.NewTransaction(). + WithIDUint64(1). + WithPostings( + ledger.NewPosting("world", "bob", "USD", big.NewInt(100)), + ) + tx3 := ledger.NewTransaction(). + WithIDUint64(2). + WithPostings( + ledger.NewPosting("world", "users:marley", "USD", big.NewInt(100)), + ) + + logs := []*ledger.Log{ + ledger.NewTransactionLog(tx1, map[string]metadata.Metadata{}), + ledger.NewTransactionLog(tx2, map[string]metadata.Metadata{}), + ledger.NewTransactionLog(tx3, map[string]metadata.Metadata{}), + } + + require.NoError(t, store.InsertLogs(ctx, ledger.ChainLogs(logs...)...)) + + tx, err := store.GetLastTransaction(ctx) + require.NoError(t, err) + require.Equal(t, *tx3, tx.Transaction) +} diff --git a/internal/storage/ledgerstore/utils.go b/internal/storage/ledgerstore/utils.go index 794cc7250..35e1fd78e 100644 --- a/internal/storage/ledgerstore/utils.go +++ b/internal/storage/ledgerstore/utils.go @@ -21,7 +21,7 @@ func fetch[T any](s *Store, ctx context.Context, builders ...func(query *bun.Sel var ret T ret = reflect.New(reflect.TypeOf(ret).Elem()).Interface().(T) - query := s.db.NewSelect() + query := s.bucket.db.NewSelect() for _, builder := range builders { query = query.Apply(builder) } @@ -37,7 +37,7 @@ func paginateWithOffset[FILTERS any, RETURN any](s *Store, ctx context.Context, q *paginate.OffsetPaginatedQuery[FILTERS], builders ...func(query *bun.SelectQuery) *bun.SelectQuery) (*api.Cursor[RETURN], error) { //var ret RETURN - query := s.db.NewSelect() + query := s.bucket.db.NewSelect() for _, builder := range builders { query = query.Apply(builder) } @@ -49,7 +49,7 @@ func paginateWithOffset[FILTERS any, RETURN any](s *Store, ctx context.Context, } func paginateWithColumn[FILTERS any, RETURN any](s *Store, ctx context.Context, q *paginate.ColumnPaginatedQuery[FILTERS], builders ...func(query *bun.SelectQuery) *bun.SelectQuery) (*api.Cursor[RETURN], error) { - query := s.db.NewSelect() + query := s.bucket.db.NewSelect() for _, builder := range builders { query = query.Apply(builder) } @@ -58,11 +58,11 @@ func paginateWithColumn[FILTERS any, RETURN any](s *Store, ctx context.Context, } func count(s *Store, ctx context.Context, builders ...func(query *bun.SelectQuery) *bun.SelectQuery) (int, error) { - query := s.db.NewSelect() + query := s.bucket.db.NewSelect() for _, builder := range builders { query = query.Apply(builder) } - return s.db.NewSelect(). + return s.bucket.db.NewSelect(). TableExpr("(" + query.String() + ") data"). Count(ctx) } diff --git a/internal/storage/paginate/iterate.go b/internal/storage/paginate/iterate.go new file mode 100644 index 000000000..838c109a8 --- /dev/null +++ b/internal/storage/paginate/iterate.go @@ -0,0 +1,36 @@ +package paginate + +import ( + "context" + "reflect" + + sharedapi "github.com/formancehq/stack/libs/go-libs/api" + "github.com/pkg/errors" +) + +func Iterate[T any, Q any](ctx context.Context, q Q, iterator func(ctx context.Context, q Q) (*sharedapi.Cursor[T], error), cb func(cursor *sharedapi.Cursor[T]) error) error { + + for { + cursor, err := iterator(ctx, q) + if err != nil { + return err + } + + if err := cb(cursor); err != nil { + return err + } + + if !cursor.HasMore { + break + } + + newQuery := reflect.New(reflect.TypeOf(q)) + if err := UnmarshalCursor(cursor.Next, newQuery.Interface()); err != nil { + return errors.Wrap(err, "paginating next request") + } + + q = newQuery.Elem().Interface().(Q) + } + + return nil +} diff --git a/internal/storage/sqlutils/errors.go b/internal/storage/sqlutils/errors.go index 60c1b9079..07cc5b274 100644 --- a/internal/storage/sqlutils/errors.go +++ b/internal/storage/sqlutils/errors.go @@ -31,9 +31,10 @@ func PostgresError(err error) error { } var ( - ErrNotFound = errors.New("not found") - ErrStoreAlreadyExists = errors.New("store already exists") - ErrStoreNotFound = errors.New("store not found") + ErrNotFound = errors.New("not found") + ErrBucketAlreadyExists = errors.New("bucket already exists") + ErrStoreAlreadyExists = errors.New("store already exists") + ErrStoreNotFound = errors.New("store not found") ) func IsNotFoundError(err error) bool { diff --git a/internal/storage/sqlutils/utils.go b/internal/storage/sqlutils/utils.go index 13e39975f..8c625a6b3 100644 --- a/internal/storage/sqlutils/utils.go +++ b/internal/storage/sqlutils/utils.go @@ -7,6 +7,8 @@ import ( "net/url" "time" + "github.com/pkg/errors" + "github.com/formancehq/stack/libs/go-libs/bun/bundebug" "github.com/uptrace/bun" "github.com/uptrace/bun/dialect/pgdialect" @@ -30,7 +32,7 @@ func (opts ConnectionOptions) String() string { func OpenSQLDB(options ConnectionOptions, hooks ...bun.QueryHook) (*bun.DB, error) { sqldb, err := sql.Open("postgres", options.DatabaseSourceName) if err != nil { - return nil, err + return nil, errors.Wrap(err, "connecting to server") } if options.MaxIdleConns != 0 { sqldb.SetMaxIdleConns(options.MaxIdleConns) @@ -52,23 +54,27 @@ func OpenSQLDB(options ConnectionOptions, hooks ...bun.QueryHook) (*bun.DB, erro } if err := db.Ping(); err != nil { - return nil, err + return nil, errors.Wrap(err, "pinging server") } return db, nil } func OpenDBWithSchema(connectionOptions ConnectionOptions, schema string, hooks ...bun.QueryHook) (*bun.DB, error) { - parsedConnectionParams, err := url.Parse(connectionOptions.DatabaseSourceName) + connectionOptions.DatabaseSourceName = SchemaConnectionString(connectionOptions.DatabaseSourceName, schema) + + return OpenSQLDB(connectionOptions, hooks...) +} + +func SchemaConnectionString(sourceName, schema string) string { + parsedConnectionParams, err := url.Parse(sourceName) if err != nil { - return nil, PostgresError(err) + panic(err) } query := parsedConnectionParams.Query() query.Set("search_path", schema) parsedConnectionParams.RawQuery = query.Encode() - connectionOptions.DatabaseSourceName = parsedConnectionParams.String() - - return OpenSQLDB(connectionOptions, hooks...) + return parsedConnectionParams.String() } diff --git a/internal/storage/storagetesting/storage.go b/internal/storage/storagetesting/storage.go index 0aa29ee6c..0257c236f 100644 --- a/internal/storage/storagetesting/storage.go +++ b/internal/storage/storagetesting/storage.go @@ -13,10 +13,10 @@ import ( ) func StorageDriver(t pgtesting.TestingT) *driver.Driver { - pgServer := pgtesting.NewPostgresDatabase(t) + pgDatabase := pgtesting.NewPostgresDatabase(t) d := driver.New(sqlutils.ConnectionOptions{ - DatabaseSourceName: pgServer.ConnString(), + DatabaseSourceName: pgDatabase.ConnString(), Debug: testing.Verbose(), MaxIdleConns: 40, MaxOpenConns: 40, @@ -24,6 +24,9 @@ func StorageDriver(t pgtesting.TestingT) *driver.Driver { }) require.NoError(t, d.Initialize(context.Background())) + t.Cleanup(func() { + require.NoError(t, d.Close()) + }) return d } diff --git a/internal/storage/systemstore/ledgers.go b/internal/storage/systemstore/ledgers.go index b00aa15ec..9d10f81f5 100644 --- a/internal/storage/systemstore/ledgers.go +++ b/internal/storage/systemstore/ledgers.go @@ -3,91 +3,88 @@ package systemstore import ( "context" - storageerrors "github.com/formancehq/ledger/internal/storage/sqlutils" + "github.com/formancehq/ledger/internal/storage/paginate" + "github.com/formancehq/ledger/internal/storage/sqlutils" + sharedapi "github.com/formancehq/stack/libs/go-libs/api" ledger "github.com/formancehq/ledger/internal" "github.com/pkg/errors" "github.com/uptrace/bun" ) -type Ledgers struct { +type Ledger struct { bun.BaseModel `bun:"_system.ledgers,alias:ledgers"` - Ledger string `bun:"ledger,type:varchar(255),pk"` // Primary key - AddedAt ledger.Time `bun:"addedat,type:timestamp"` + Name string `bun:"ledger,type:varchar(255),pk" json:"name"` // Primary key + AddedAt ledger.Time `bun:"addedat,type:timestamp" json:"addedAt"` + Bucket string `bun:"bucket,type:varchar(255)" json:"bucket"` } -func (s *Store) ListLedgers(ctx context.Context) ([]string, error) { - query := s.db.NewSelect(). - Model((*Ledgers)(nil)). - Column("ledger"). - String() +type PaginatedQueryOptions struct { + PageSize uint64 `json:"pageSize"` +} - rows, err := s.db.QueryContext(ctx, query) - if err != nil { - return nil, storageerrors.PostgresError(err) - } - defer rows.Close() - - res := make([]string, 0) - for rows.Next() { - var ledger string - if err := rows.Scan(&ledger); err != nil { - return nil, storageerrors.PostgresError(err) - } - res = append(res, ledger) +type ListLedgersQuery paginate.OffsetPaginatedQuery[PaginatedQueryOptions] + +func (query ListLedgersQuery) WithPageSize(pageSize uint64) ListLedgersQuery { + query.PageSize = pageSize + return query +} + +func NewListLedgersQuery(pageSize uint64) ListLedgersQuery { + return ListLedgersQuery{ + PageSize: pageSize, } - return res, nil +} + +func (s *Store) ListLedgers(ctx context.Context, q ListLedgersQuery) (*sharedapi.Cursor[Ledger], error) { + query := s.db.NewSelect(). + Table("_system.ledgers"). + Column("ledger", "bucket", "addedat"). + Order("addedat asc") + + return paginate.UsingOffset[PaginatedQueryOptions, Ledger](ctx, query, paginate.OffsetPaginatedQuery[PaginatedQueryOptions](q)) } func (s *Store) DeleteLedger(ctx context.Context, name string) error { _, err := s.db.NewDelete(). - Model((*Ledgers)(nil)). + Model((*Ledger)(nil)). Where("ledger = ?", name). Exec(ctx) - return errors.Wrap(storageerrors.PostgresError(err), "delete ledger from system store") + return errors.Wrap(sqlutils.PostgresError(err), "delete ledger from system store") } -func (s *Store) Register(ctx context.Context, ledgerName string) (bool, error) { - l := &Ledgers{ - Ledger: ledgerName, - AddedAt: ledger.Now(), +func (s *Store) RegisterLedger(ctx context.Context, l *Ledger) (bool, error) { + return RegisterLedger(ctx, s.db, l) +} + +func (s *Store) GetLedger(ctx context.Context, name string) (*Ledger, error) { + ret := &Ledger{} + if err := s.db.NewSelect(). + Model(ret). + Column("ledger", "bucket", "addedat"). + Where("ledger = ?", name). + Scan(ctx); err != nil { + return nil, sqlutils.PostgresError(err) } - ret, err := s.db.NewInsert(). + return ret, nil +} + +func RegisterLedger(ctx context.Context, db bun.IDB, l *Ledger) (bool, error) { + ret, err := db.NewInsert(). Model(l). Ignore(). Exec(ctx) if err != nil { - return false, storageerrors.PostgresError(err) + return false, sqlutils.PostgresError(err) } affected, err := ret.RowsAffected() if err != nil { - return false, storageerrors.PostgresError(err) + return false, sqlutils.PostgresError(err) } return affected > 0, nil } - -func (s *Store) Exists(ctx context.Context, ledger string) (bool, error) { - query := s.db.NewSelect(). - Model((*Ledgers)(nil)). - Column("ledger"). - Where("ledger = ?", ledger). - String() - - ret := s.db.QueryRowContext(ctx, query) - if ret.Err() != nil { - return false, nil - } - - var t string - _ = ret.Scan(&t) // Trigger close - - if t == "" { - return false, nil - } - return true, nil -} diff --git a/internal/storage/systemstore/ledgers_test.go b/internal/storage/systemstore/ledgers_test.go new file mode 100644 index 000000000..e01d0dbc8 --- /dev/null +++ b/internal/storage/systemstore/ledgers_test.go @@ -0,0 +1,69 @@ +package systemstore + +import ( + "fmt" + "testing" + "time" + + ledger "github.com/formancehq/ledger/internal" + "github.com/formancehq/ledger/internal/storage/paginate" + "github.com/formancehq/ledger/internal/storage/sqlutils" + "github.com/formancehq/stack/libs/go-libs/logging" + "github.com/formancehq/stack/libs/go-libs/pgtesting" + "github.com/stretchr/testify/require" +) + +func newSystemStore(t *testing.T) *Store { + t.Parallel() + t.Helper() + ctx := logging.TestingContext() + + pgServer := pgtesting.NewPostgresDatabase(t) + + store, err := Connect(ctx, sqlutils.ConnectionOptions{ + DatabaseSourceName: pgServer.ConnString(), + Debug: testing.Verbose(), + }) + require.NoError(t, err) + t.Cleanup(func() { + require.NoError(t, store.Close()) + }) + + require.NoError(t, Migrate(ctx, store.DB())) + + return store +} + +func TestListLedgers(t *testing.T) { + ctx := logging.TestingContext() + store := newSystemStore(t) + + ledgers := make([]Ledger, 0) + pageSize := uint64(2) + count := uint64(10) + now := ledger.Now() + for i := uint64(0); i < count; i++ { + ledger := Ledger{ + Name: fmt.Sprintf("ledger%d", i), + AddedAt: now.Add(time.Duration(i) * time.Second), + } + ledgers = append(ledgers, ledger) + _, err := store.RegisterLedger(ctx, &ledger) + require.NoError(t, err) + } + + cursor, err := store.ListLedgers(ctx, NewListLedgersQuery(pageSize)) + require.NoError(t, err) + require.Len(t, cursor.Data, int(pageSize)) + require.Equal(t, ledgers[:pageSize], cursor.Data) + + for i := pageSize; i < count; i += pageSize { + query := ListLedgersQuery{} + require.NoError(t, paginate.UnmarshalCursor(cursor.Next, &query)) + + cursor, err = store.ListLedgers(ctx, query) + require.NoError(t, err) + require.Len(t, cursor.Data, 2) + require.Equal(t, ledgers[i:i+pageSize], cursor.Data) + } +} diff --git a/internal/storage/systemstore/main_test.go b/internal/storage/systemstore/main_test.go new file mode 100644 index 000000000..0003c2e05 --- /dev/null +++ b/internal/storage/systemstore/main_test.go @@ -0,0 +1,22 @@ +package systemstore + +import ( + "os" + "testing" + + "github.com/formancehq/stack/libs/go-libs/logging" + "github.com/formancehq/stack/libs/go-libs/pgtesting" +) + +func TestMain(m *testing.M) { + if err := pgtesting.CreatePostgresServer(); err != nil { + logging.Error(err) + os.Exit(1) + } + + code := m.Run() + if err := pgtesting.DestroyPostgresServer(); err != nil { + logging.Error(err) + } + os.Exit(code) +} diff --git a/internal/storage/systemstore/migrations.go b/internal/storage/systemstore/migrations.go index 95e0cb392..cec175571 100644 --- a/internal/storage/systemstore/migrations.go +++ b/internal/storage/systemstore/migrations.go @@ -9,21 +9,21 @@ import ( "github.com/uptrace/bun" ) -func (s *Store) getMigrator() *migrations.Migrator { - migrator := migrations.NewMigrator(migrations.WithSchema("_system", true)) +func Migrate(ctx context.Context, db bun.IDB) error { + migrator := migrations.NewMigrator(migrations.WithSchema(Schema, true)) migrator.RegisterMigrations( migrations.Migration{ Name: "Init schema", UpWithContext: func(ctx context.Context, tx bun.Tx) error { _, err := tx.NewCreateTable(). - Model((*Ledgers)(nil)). + Model((*Ledger)(nil)). IfNotExists(). Exec(ctx) if err != nil { return sqlutils.PostgresError(err) } - _, err = s.db.NewCreateTable(). + _, err = tx.NewCreateTable(). Model((*configuration)(nil)). IfNotExists(). Exec(ctx) @@ -31,5 +31,5 @@ func (s *Store) getMigrator() *migrations.Migrator { }, }, ) - return migrator + return migrator.Up(ctx, db) } diff --git a/internal/storage/systemstore/store.go b/internal/storage/systemstore/store.go index ad50fede3..96477697c 100644 --- a/internal/storage/systemstore/store.go +++ b/internal/storage/systemstore/store.go @@ -2,22 +2,36 @@ package systemstore import ( "context" + "fmt" "github.com/formancehq/ledger/internal/storage/sqlutils" "github.com/uptrace/bun" ) +const Schema = "_system" + type Store struct { db *bun.DB } -func NewStore(db *bun.DB) *Store { - return &Store{db: db} +func Connect(ctx context.Context, connectionOptions sqlutils.ConnectionOptions) (*Store, error) { + + db, err := sqlutils.OpenDBWithSchema(connectionOptions, Schema) + if err != nil { + return nil, sqlutils.PostgresError(err) + } + + _, err = db.ExecContext(ctx, fmt.Sprintf(`create schema if not exists "%s"`, Schema)) + if err != nil { + return nil, sqlutils.PostgresError(err) + } + + return &Store{db: db}, nil } -func (s *Store) Initialize(ctx context.Context) error { - return sqlutils.PostgresError(s.getMigrator().Up(ctx, s.db)) +func (s *Store) DB() *bun.DB { + return s.db } func (s *Store) Close() error { diff --git a/libs/go.mod b/libs/go.mod index 5db093a04..4e11b4cac 100644 --- a/libs/go.mod +++ b/libs/go.mod @@ -14,8 +14,8 @@ require ( github.com/imdario/mergo v0.3.13 github.com/jackc/pgx/v5 v5.3.0 github.com/lib/pq v1.10.7 - github.com/nats-io/nats-server/v2 v2.9.8 - github.com/nats-io/nats.go v1.23.0 + github.com/nats-io/nats-server/v2 v2.9.23 + github.com/nats-io/nats.go v1.28.0 github.com/ory/dockertest/v3 v3.9.1 github.com/pbnjay/memory v0.0.0-20210728143218-7b4eea64cf58 github.com/pkg/errors v0.9.1 @@ -97,7 +97,7 @@ require ( github.com/jcmturner/gokrb5/v8 v8.4.3 // indirect github.com/jcmturner/rpc/v2 v2.0.3 // indirect github.com/jinzhu/inflection v1.0.0 // indirect - github.com/klauspost/compress v1.15.15 // indirect + github.com/klauspost/compress v1.16.7 // indirect github.com/lithammer/shortuuid/v3 v3.0.7 // indirect github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect github.com/magiconair/properties v1.8.7 // indirect @@ -106,8 +106,8 @@ require ( github.com/minio/highwayhash v1.0.2 // indirect github.com/mitchellh/mapstructure v1.5.0 // indirect github.com/moby/term v0.0.0-20220808134915-39b0c02b01ae // indirect - github.com/nats-io/jwt/v2 v2.3.0 // indirect - github.com/nats-io/nkeys v0.3.0 // indirect + github.com/nats-io/jwt/v2 v2.5.0 // indirect + github.com/nats-io/nkeys v0.4.4 // indirect github.com/nats-io/nuid v1.0.1 // indirect github.com/oklog/ulid v1.3.1 // indirect github.com/opencontainers/go-digest v1.0.0 // indirect @@ -145,12 +145,12 @@ require ( go.uber.org/atomic v1.10.0 // indirect go.uber.org/dig v1.16.1 // indirect go.uber.org/multierr v1.9.0 // indirect - golang.org/x/crypto v0.6.0 // indirect + golang.org/x/crypto v0.12.0 // indirect golang.org/x/mod v0.8.0 // indirect golang.org/x/net v0.10.0 // indirect - golang.org/x/sys v0.8.0 // indirect - golang.org/x/text v0.9.0 // indirect - golang.org/x/time v0.2.0 // indirect + golang.org/x/sys v0.11.0 // indirect + golang.org/x/text v0.12.0 // indirect + golang.org/x/time v0.3.0 // indirect golang.org/x/tools v0.6.0 // indirect google.golang.org/genproto v0.0.0-20230410155749-daa745c078e1 // indirect google.golang.org/grpc v1.56.3 // indirect diff --git a/libs/go.sum b/libs/go.sum index 4d9a5fde8..0f2a5e0d5 100644 --- a/libs/go.sum +++ b/libs/go.sum @@ -295,8 +295,8 @@ github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7V github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvWXihfKN4Q= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= -github.com/klauspost/compress v1.15.15 h1:EF27CXIuDsYJ6mmvtBRlEuB2UVOqHG1tAXgZ7yIO+lw= -github.com/klauspost/compress v1.15.15/go.mod h1:ZcK2JAFqKOpnBlxcLsJzYfrS9X1akm9fHZNnD9+Vo/4= +github.com/klauspost/compress v1.16.7 h1:2mk3MPGNzKyxErAw8YaohYh69+pa4sIQSC0fPGCFR9I= +github.com/klauspost/compress v1.16.7/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/kr/fs v0.1.0/go.mod h1:FFnZGqtBN9Gxj7eW1uZ42v5BccTP0vu6NEaFoC2HwRg= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= @@ -332,14 +332,14 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJ github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= github.com/mrunalp/fileutils v0.5.0/go.mod h1:M1WthSahJixYnrXQl/DFQuteStB1weuxD2QJNHXfbSQ= github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= -github.com/nats-io/jwt/v2 v2.3.0 h1:z2mA1a7tIf5ShggOFlR1oBPgd6hGqcDYsISxZByUzdI= -github.com/nats-io/jwt/v2 v2.3.0/go.mod h1:0tqz9Hlu6bCBFLWAASKhE5vUA4c24L9KPUUgvwumE/k= -github.com/nats-io/nats-server/v2 v2.9.8 h1:jgxZsv+A3Reb3MgwxaINcNq/za8xZInKhDg9Q0cGN1o= -github.com/nats-io/nats-server/v2 v2.9.8/go.mod h1:AB6hAnGZDlYfqb7CTAm66ZKMZy9DpfierY1/PbpvI2g= -github.com/nats-io/nats.go v1.23.0 h1:lR28r7IX44WjYgdiKz9GmUeW0uh/m33uD3yEjLZ2cOE= -github.com/nats-io/nats.go v1.23.0/go.mod h1:ki/Scsa23edbh8IRZbCuNXR9TDcbvfaSijKtaqQgw+Q= -github.com/nats-io/nkeys v0.3.0 h1:cgM5tL53EvYRU+2YLXIK0G2mJtK12Ft9oeooSZMA2G8= -github.com/nats-io/nkeys v0.3.0/go.mod h1:gvUNGjVcM2IPr5rCsRsC6Wb3Hr2CQAm08dsxtV6A5y4= +github.com/nats-io/jwt/v2 v2.5.0 h1:WQQ40AAlqqfx+f6ku+i0pOVm+ASirD4fUh+oQsiE9Ak= +github.com/nats-io/jwt/v2 v2.5.0/go.mod h1:24BeQtRwxRV8ruvC4CojXlx/WQ/VjuwlYiH+vu/+ibI= +github.com/nats-io/nats-server/v2 v2.9.23 h1:6Wj6H6QpP9FMlpCyWUaNu2yeZ/qGj+mdRkZ1wbikExU= +github.com/nats-io/nats-server/v2 v2.9.23/go.mod h1:wEjrEy9vnqIGE4Pqz4/c75v9Pmaq7My2IgFmnykc4C0= +github.com/nats-io/nats.go v1.28.0 h1:Th4G6zdsz2d0OqXdfzKLClo6bOfoI/b1kInhRtFIy5c= +github.com/nats-io/nats.go v1.28.0/go.mod h1:XpbWUlOElGwTYbMR7imivs7jJj9GtK7ypv321Wp6pjc= +github.com/nats-io/nkeys v0.4.4 h1:xvBJ8d69TznjcQl9t6//Q5xXuVhyYiSos6RPtvQNTwA= +github.com/nats-io/nkeys v0.4.4/go.mod h1:XUkxdLPTufzlihbamfzQ7mw/VGx6ObUs+0bN5sNvt64= github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw= github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c= github.com/oklog/ulid v1.3.1 h1:EGfNDEx6MqHz8B3uNV6QAib1UR2Lm97sHi3ocA6ESJ4= @@ -545,13 +545,12 @@ golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8U golang.org/x/crypto v0.0.0-20190605123033-f99c8df09eb5/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/crypto v0.0.0-20210314154223-e6e6c4f2bb5b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20211108221036-ceb1ce70b4fa/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.6.0 h1:qfktjS5LUO+fFKeJXZ+ikTRijMmljikvG68fpMMruSc= -golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58= +golang.org/x/crypto v0.12.0 h1:tFM/ta59kqch6LlvYnPa0yx5a83cL2nHflFhYKvv9Yk= +golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -707,8 +706,8 @@ golang.org/x/sys v0.0.0-20221010170243-090e33056c14/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.8.0 h1:EBmGv8NaZBZTWvrbjNoL6HVt+IVy3QDQpJs7VRIw3tU= -golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM= +golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -721,13 +720,13 @@ golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= -golang.org/x/text v0.9.0 h1:2sjJmO8cDvYveuX97RDLsxlyUxLl+GHoLxBiRdHllBE= -golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8= +golang.org/x/text v0.12.0 h1:k+n5B8goJNdU7hSvEtMUz3d1Q6D/XW4COJSJR6fN0mc= +golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= -golang.org/x/time v0.2.0 h1:52I/1L54xyEQAYdtcSuxtiT84KGYTBGXwayxmIpNJhE= -golang.org/x/time v0.2.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= +golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= diff --git a/libs/logging/logrus.go b/libs/logging/logrus.go index 28a49ed94..af7922bd8 100644 --- a/libs/logging/logrus.go +++ b/libs/logging/logrus.go @@ -79,7 +79,7 @@ func Testing() *logrusLogger { once.Do(flag.Parse) if testing.Verbose() { - logger.SetOutput(os.Stdout) + logger.SetOutput(os.Stderr) logger.SetLevel(logrus.DebugLevel) } diff --git a/libs/migrations/migrator.go b/libs/migrations/migrator.go index 77fb68d1b..b9b880e8d 100644 --- a/libs/migrations/migrator.go +++ b/libs/migrations/migrator.go @@ -16,6 +16,10 @@ const ( migrationTable = "goose_db_version" ) +var ( + ErrMissingVersionTable = errors.New("missing version table") +) + type Info struct { bun.BaseModel `bun:"goose_db_version"` @@ -83,7 +87,7 @@ func (m *Migrator) getLastVersion(ctx context.Context, querier interface { case *pq.Error: switch err.Code { case "42P01": // Table not exists - return -1, nil + return -1, ErrMissingVersionTable } } } @@ -109,136 +113,126 @@ func (m *Migrator) insertVersion(ctx context.Context, tx bun.Tx, version int) er return err } -func (m *Migrator) GetDBVersion(ctx context.Context, db *bun.DB) (int64, error) { - tx, err := m.newTx(ctx, db) - if err != nil { - return -1, err +func (m *Migrator) GetDBVersion(ctx context.Context, db bun.IDB) (int64, error) { + ret := int64(0) + if err := m.runInTX(ctx, db, func(ctx context.Context, tx bun.Tx) error { + var err error + ret, err = m.getLastVersion(ctx, tx) + return err + }); err != nil { + return 0, err } - defer func() { - _ = tx.Rollback() - }() - return m.getLastVersion(ctx, tx) + return ret, nil } -func (m *Migrator) newTx(ctx context.Context, db bun.IDB) (bun.Tx, error) { - tx, err := db.BeginTx(ctx, &sql.TxOptions{}) - if err != nil { - return bun.Tx{}, err - } - - if m.schema != "" { - _, err := tx.ExecContext(ctx, fmt.Sprintf(`set search_path = "%s"`, m.schema)) - if err != nil { - return bun.Tx{}, err +func (m *Migrator) runInTX(ctx context.Context, db bun.IDB, fn func(ctx context.Context, tx bun.Tx) error) error { + return db.RunInTx(ctx, &sql.TxOptions{}, func(ctx context.Context, tx bun.Tx) error { + if m.schema != "" { + _, err := tx.ExecContext(ctx, fmt.Sprintf(`set search_path = "%s"`, m.schema)) + if err != nil { + return err + } } - } - - return tx, err + return fn(ctx, tx) + }) } func (m *Migrator) Up(ctx context.Context, db bun.IDB) error { - tx, err := m.newTx(ctx, db) - if err != nil { - return err - } - defer func() { - _ = tx.Rollback() - }() + return m.runInTX(ctx, db, func(ctx context.Context, tx bun.Tx) error { + if m.schema != "" && m.createSchema { + _, err := tx.ExecContext(ctx, fmt.Sprintf(`create schema if not exists "%s"`, m.schema)) + if err != nil { + return err + } + } - if m.schema != "" && m.createSchema { - _, err := tx.ExecContext(ctx, fmt.Sprintf(`create schema if not exists "%s"`, m.schema)) - if err != nil { + if err := m.createVersionTable(ctx, tx); err != nil { return err } - } - - if err := m.createVersionTable(ctx, tx); err != nil { - return err - } - lastMigration, err := m.getLastVersion(ctx, tx) - if err != nil { - return err - } + lastMigration, err := m.getLastVersion(ctx, tx) + if err != nil { + return err + } - if len(m.migrations) > int(lastMigration)-1 { - for ind, migration := range m.migrations[lastMigration:] { - if migration.UpWithContext != nil { - if err := migration.UpWithContext(ctx, tx); err != nil { - return err + if len(m.migrations) > int(lastMigration)-1 { + for ind, migration := range m.migrations[lastMigration:] { + if migration.UpWithContext != nil { + if err := migration.UpWithContext(ctx, tx); err != nil { + return err + } + } else if migration.Up != nil { + if err := migration.Up(tx); err != nil { + return err + } + } else { + return errors.New("no code defined for migration") } - } else if migration.Up != nil { - if err := migration.Up(tx); err != nil { + + if err := m.insertVersion(ctx, tx, int(lastMigration)+ind+1); err != nil { return err } - } else { - return errors.New("no code defined for migration") - } - - if err := m.insertVersion(ctx, tx, int(lastMigration)+ind+1); err != nil { - return err } } - } - - return tx.Commit() + return nil + }) } func (m *Migrator) GetMigrations(ctx context.Context, db bun.IDB) ([]Info, error) { - tx, err := m.newTx(ctx, db) - if err != nil { - return nil, err - } - defer func() { - _ = tx.Rollback() - }() - - migrationTableName := migrationTable - if m.schema != "" { - migrationTableName = fmt.Sprintf(`"%s".%s`, m.schema, migrationTableName) - } ret := make([]Info, 0) - if err := tx.NewSelect(). - TableExpr(migrationTableName). - Order("version_id"). - Where("version_id >= 1"). - Column("version_id", "tstamp"). - Scan(ctx, &ret); err != nil { - return nil, err - } + if err := m.runInTX(ctx, db, func(ctx context.Context, tx bun.Tx) error { + migrationTableName := migrationTable + if m.schema != "" { + migrationTableName = fmt.Sprintf(`"%s".%s`, m.schema, migrationTableName) + } - for i := 0; i < len(ret); i++ { - ret[i].Name = m.migrations[i].Name - ret[i].State = "DONE" - } + if err := tx.NewSelect(). + TableExpr(migrationTableName). + Order("version_id"). + Where("version_id >= 1"). + Column("version_id", "tstamp"). + Scan(ctx, &ret); err != nil { + return err + } + + for i := 0; i < len(ret); i++ { + ret[i].Name = m.migrations[i].Name + ret[i].State = "DONE" + } + + for i := len(ret); i < len(m.migrations); i++ { + ret = append(ret, Info{ + Version: fmt.Sprint(i), + Name: m.migrations[i].Name, + State: "TO DO", + }) + } - for i := len(ret); i < len(m.migrations); i++ { - ret = append(ret, Info{ - Version: fmt.Sprint(i), - Name: m.migrations[i].Name, - State: "TO DO", - }) + return nil + }); err != nil { + return nil, err } return ret, nil } func (m *Migrator) IsUpToDate(ctx context.Context, db *bun.DB) (bool, error) { - tx, err := m.newTx(ctx, db) - if err != nil { - return false, err - } - defer func() { - _ = tx.Rollback() - }() - version, err := m.getLastVersion(ctx, tx) - if err != nil { + ret := false + if err := m.runInTX(ctx, db, func(ctx context.Context, tx bun.Tx) error { + version, err := m.getLastVersion(ctx, tx) + if err != nil { + return err + } + + ret = int(version) == len(m.migrations) + return nil + }); err != nil { return false, err } - return int(version) == len(m.migrations), nil + return ret, nil } func NewMigrator(opts ...option) *Migrator { diff --git a/libs/pgtesting/postgres.go b/libs/pgtesting/postgres.go index 452605a72..6d68fda6e 100644 --- a/libs/pgtesting/postgres.go +++ b/libs/pgtesting/postgres.go @@ -15,8 +15,6 @@ import ( "github.com/ory/dockertest/v3/docker" "github.com/pkg/errors" "github.com/stretchr/testify/require" - "github.com/uptrace/bun" - "github.com/uptrace/bun/dialect/pgdialect" ) type TestingT interface { @@ -35,7 +33,7 @@ func (s *pgDatabase) ConnString() string { type pgServer struct { destroy func() error lock sync.Mutex - db *bun.DB + db *sql.DB port string config config } @@ -221,7 +219,12 @@ func CreatePostgresServer(opts ...option) error { fmt.Sprintf("POSTGRES_DB=%s", cfg.initialDatabaseName), }, Entrypoint: nil, - Cmd: []string{"-c", "superuser-reserved-connections=0"}, + Cmd: []string{ + "-c", "superuser-reserved-connections=0", + "-c", "enable_partition_pruning=on", + "-c", "enable_partitionwise_join=on", + "-c", "enable_partitionwise_aggregate=on", + }, }, cfg.hostConfigOptions...) if err != nil { return errors.Wrap(err, "unable to start postgres server container") @@ -249,11 +252,11 @@ func CreatePostgresServer(opts ...option) error { } try := time.Duration(0) - sqldb, err := sql.Open("postgres", srv.GetDatabaseDSN(cfg.initialDatabaseName)) + srv.db, err = sql.Open("postgres", srv.GetDatabaseDSN(cfg.initialDatabaseName)) if err != nil { return err } - srv.db = bun.NewDB(sqldb, pgdialect.New()) + for try*cfg.statusCheckInterval < cfg.maximumWaitingTime { err := srv.db.Ping() if err != nil { diff --git a/openapi.yaml b/openapi.yaml index 3d3794c6a..a9dcb1bc1 100644 --- a/openapi.yaml +++ b/openapi.yaml @@ -25,6 +25,96 @@ paths: schema: $ref: '#/components/schemas/ErrorResponse' + /v2: + get: + summary: List ledgers + operationId: listLedgers + parameters: + - name: pageSize + in: query + description: | + The maximum number of results to return per page. + example: 100 + schema: + type: integer + format: int64 + minimum: 1 + maximum: 1000 + - name: cursor + in: query + description: | + Parameter used in pagination requests. Maximum page size is set to 15. + Set to the value of next for the next page of results. + Set to the value of previous for the previous page of results. + No other parameters can be set when this parameter is set. + schema: + type: string + example: aHR0cHM6Ly9nLnBhZ2UvTmVrby1SYW1lbj9zaGFyZQ== + tags: + - Ledger + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: '#/components/schemas/LedgerListResponse' + default: + description: Error + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + + /v2/{ledger}: + parameters: + - name: ledger + in: path + description: Name of the ledger. + required: true + schema: + type: string + example: ledger001 + + get: + summary: Get a ledger + operationId: getLedger + tags: + - Ledger + responses: + "200": + description: OK + content: + application/json: + schema: + $ref: '#/components/schemas/Ledger' + default: + description: Error + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + + post: + summary: Create a ledger + operationId: createLedger + tags: + - Ledger + requestBody: + content: + application/json: + schema: + $ref: '#/components/schemas/CreateLedgerRequest' + responses: + "204": + description: OK + default: + description: Error + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorResponse' + /v2/{ledger}/_info: get: summary: Get information about a ledger @@ -949,27 +1039,6 @@ components: data: $ref: '#/components/schemas/AssetsBalances' - Config: - type: object - properties: - storage: - $ref: '#/components/schemas/LedgerStorage' - required: - - storage - - LedgerStorage: - type: object - properties: - driver: - type: string - ledgers: - type: array - items: - type: string - required: - - driver - - ledgers - Metadata: type: object additionalProperties: @@ -979,8 +1048,6 @@ components: ConfigInfo: type: object properties: - config: - $ref: '#/components/schemas/Config' server: type: string version: @@ -1526,4 +1593,57 @@ components: type: string required: - errorCode - - errorDescription \ No newline at end of file + - errorDescription + + CreateLedgerRequest: + type: object + properties: + bucket: + type: string + + Ledger: + type: object + properties: + name: + type: string + addedAt: + type: string + format: date-time + bucket: + type: string + required: + - name + - addedAt + - bucket + + LedgerListResponse: + type: object + required: + - cursor + properties: + cursor: + type: object + required: + - pageSize + - hasMore + - data + properties: + pageSize: + type: integer + format: int64 + minimum: 1 + maximum: 1000 + example: 15 + hasMore: + type: boolean + example: false + previous: + type: string + example: "YXVsdCBhbmQgYSBtYXhpbXVtIG1heF9yZXN1bHRzLol=" + next: + type: string + example: "" + data: + type: array + items: + $ref: '#/components/schemas/Ledger' \ No newline at end of file