From ff2859973167687d49f7ff6c59e9796978c6685e Mon Sep 17 00:00:00 2001 From: Shawn Reuland Date: Wed, 3 Jul 2024 10:48:03 -0700 Subject: [PATCH 01/24] #4911: added new config interface for precomputed ledger backend --- services/horizon/internal/ingest/main.go | 31 ++++++++++++++++++++++++ 1 file changed, 31 insertions(+) diff --git a/services/horizon/internal/ingest/main.go b/services/horizon/internal/ingest/main.go index 650a08b426..0aa2f10f27 100644 --- a/services/horizon/internal/ingest/main.go +++ b/services/horizon/internal/ingest/main.go @@ -20,6 +20,7 @@ import ( "github.com/stellar/go/services/horizon/internal/db2/history" "github.com/stellar/go/services/horizon/internal/ingest/filters" apkg "github.com/stellar/go/support/app" + "github.com/stellar/go/support/datastore" "github.com/stellar/go/support/db" "github.com/stellar/go/support/errors" logpkg "github.com/stellar/go/support/log" @@ -82,7 +83,35 @@ const ( var log = logpkg.DefaultLogger.WithField("service", "ingest") +type LedgerMetaBackendType int64 + +const ( + LedgerBackendCaptiveCore LedgerMetaBackendType = iota + LedgerBackendPrecomputed +) + +func (s LedgerMetaBackendType) String() string { + switch s { + case LedgerBackendCaptiveCore: + return "captive core" + case LedgerBackendPrecomputed: + return "precomputed" +} + +type BufferedBackendConfig struct { + BufferSize uint32 `toml:"size"` + NumWorkers uint32 `toml:"num_workers"` + RetryLimit uint32 `toml:"retry_limit"` + RetryWait time.Duration `toml:"retry_wait"` +} + +type PrecomputedLedgerMetaConfig struct { + DataStoreConfig datastore.DataStoreConfig `toml:"datastore_config"` + BufferedBackendConfig BufferedBackendConfig `toml:"buffered_backend_config"` +} + type Config struct { + LedgerMetaBackendType LedgerMetaBackendType StellarCoreURL string CaptiveCoreBinaryPath string CaptiveCoreStoragePath string @@ -115,6 +144,8 @@ type Config struct { CoreBuildVersionFn ledgerbackend.CoreBuildVersionFunc ReapConfig ReapConfig + + PrecomputedMetaConfig PrecomputedLedgerMetaConfig } const ( From 54dfcc3b55d5fa72be17fa61be21b7f633ddd238 Mon Sep 17 00:00:00 2001 From: Shawn Reuland Date: Fri, 5 Jul 2024 10:56:36 -0700 Subject: [PATCH 02/24] #4911: fixed syntax error on backend type enum --- services/horizon/internal/ingest/main.go | 3 +++ 1 file changed, 3 insertions(+) diff --git a/services/horizon/internal/ingest/main.go b/services/horizon/internal/ingest/main.go index 0aa2f10f27..a7efa58f2c 100644 --- a/services/horizon/internal/ingest/main.go +++ b/services/horizon/internal/ingest/main.go @@ -96,6 +96,9 @@ func (s LedgerMetaBackendType) String() string { return "captive core" case LedgerBackendPrecomputed: return "precomputed" + default: + return "" + } } type BufferedBackendConfig struct { From 5a020f3d21ad7f743d7760441116d90fd20815ac Mon Sep 17 00:00:00 2001 From: Shawn Reuland Date: Fri, 5 Jul 2024 12:46:35 -0700 Subject: [PATCH 03/24] #4911: added buffered storage ledger backend to NewSystem factory method --- services/horizon/internal/ingest/main.go | 93 ++++++++++++++++-------- 1 file changed, 64 insertions(+), 29 deletions(-) diff --git a/services/horizon/internal/ingest/main.go b/services/horizon/internal/ingest/main.go index a7efa58f2c..e5f5cc1e95 100644 --- a/services/horizon/internal/ingest/main.go +++ b/services/horizon/internal/ingest/main.go @@ -97,20 +97,20 @@ func (s LedgerMetaBackendType) String() string { case LedgerBackendPrecomputed: return "precomputed" default: - return "" + return "" } } type BufferedBackendConfig struct { - BufferSize uint32 `toml:"size"` - NumWorkers uint32 `toml:"num_workers"` - RetryLimit uint32 `toml:"retry_limit"` - RetryWait time.Duration `toml:"retry_wait"` + BufferSize uint32 `toml:"size"` + NumWorkers uint32 `toml:"num_workers"` + RetryLimit uint32 `toml:"retry_limit"` + RetryWait time.Duration `toml:"retry_wait"` } type PrecomputedLedgerMetaConfig struct { - DataStoreConfig datastore.DataStoreConfig `toml:"datastore_config"` - BufferedBackendConfig BufferedBackendConfig `toml:"buffered_backend_config"` + DataStoreConfig datastore.DataStoreConfig `toml:"datastore_config"` + BufferedBackendConfig BufferedBackendConfig `toml:"buffered_backend_config"` } type Config struct { @@ -148,7 +148,7 @@ type Config struct { ReapConfig ReapConfig - PrecomputedMetaConfig PrecomputedLedgerMetaConfig + PrecomputedMetaConfig *PrecomputedLedgerMetaConfig } const ( @@ -297,28 +297,63 @@ func NewSystem(config Config) (System, error) { return nil, errors.Wrap(err, "error creating history archive") } - // the only ingest option is local captive core config - logger := log.WithField("subservice", "stellar-core") - ledgerBackend, err := ledgerbackend.NewCaptive( - ledgerbackend.CaptiveCoreConfig{ - BinaryPath: config.CaptiveCoreBinaryPath, - StoragePath: config.CaptiveCoreStoragePath, - UseDB: config.CaptiveCoreConfigUseDB, - Toml: config.CaptiveCoreToml, - NetworkPassphrase: config.NetworkPassphrase, - HistoryArchiveURLs: config.HistoryArchiveURLs, - CheckpointFrequency: config.CheckpointFrequency, - LedgerHashStore: ledgerbackend.NewHorizonDBLedgerHashStore(config.HistorySession), - Log: logger, - Context: ctx, - UserAgent: fmt.Sprintf("captivecore horizon/%s golang/%s", apkg.Version(), runtime.Version()), - CoreProtocolVersionFn: config.CoreProtocolVersionFn, - CoreBuildVersionFn: config.CoreBuildVersionFn, - }, - ) - if err != nil { + var ledgerBackend ledgerbackend.LedgerBackend + + switch config.LedgerMetaBackendType { + case LedgerBackendCaptiveCore: + logger := log.WithField("subservice", "stellar-core") + ledgerBackend, err = ledgerbackend.NewCaptive( + ledgerbackend.CaptiveCoreConfig{ + BinaryPath: config.CaptiveCoreBinaryPath, + StoragePath: config.CaptiveCoreStoragePath, + UseDB: config.CaptiveCoreConfigUseDB, + Toml: config.CaptiveCoreToml, + NetworkPassphrase: config.NetworkPassphrase, + HistoryArchiveURLs: config.HistoryArchiveURLs, + CheckpointFrequency: config.CheckpointFrequency, + LedgerHashStore: ledgerbackend.NewHorizonDBLedgerHashStore(config.HistorySession), + Log: logger, + Context: ctx, + UserAgent: fmt.Sprintf("captivecore horizon/%s golang/%s", apkg.Version(), runtime.Version()), + CoreProtocolVersionFn: config.CoreProtocolVersionFn, + CoreBuildVersionFn: config.CoreBuildVersionFn, + }, + ) + if err != nil { + cancel() + return nil, errors.Wrap(err, "error creating captive core backend") + } + log.Infof("successfully created ledger backend of type captive core") + case LedgerBackendPrecomputed: + if config.PrecomputedMetaConfig == nil { + cancel() + return nil, errors.New("error creating precomputed buffered backend, precomputed backend config is not present") + } + precompConfig := config.PrecomputedMetaConfig + + dataStore, err := datastore.NewDataStore(ctx, precompConfig.DataStoreConfig) + if err != nil { + cancel() + return nil, errors.Wrapf(err, "error creating datastore from config, %v", precompConfig.DataStoreConfig) + } + + bufferedConfig := ledgerbackend.BufferedStorageBackendConfig{ + LedgerBatchConfig: precompConfig.DataStoreConfig.Schema, + DataStore: dataStore, + BufferSize: precompConfig.BufferedBackendConfig.BufferSize, + NumWorkers: precompConfig.BufferedBackendConfig.NumWorkers, + RetryLimit: precompConfig.BufferedBackendConfig.RetryLimit, + RetryWait: precompConfig.BufferedBackendConfig.RetryWait, + } + + if ledgerBackend, err = ledgerbackend.NewBufferedStorageBackend(ctx, bufferedConfig); err != nil { + cancel() + return nil, errors.Wrapf(err, "error creating buffered storage backend, %v", bufferedConfig) + } + log.Infof("successfully created ledger backend of type buffered storage") + default: cancel() - return nil, errors.Wrap(err, "error creating captive core backend") + return nil, errors.Errorf("unsupported ledger backend type %v", config.LedgerMetaBackendType.String()) } historyQ := &history.Q{config.HistorySession.Clone()} From 5d8d64bb08393ba688cbf667072478b3cbc62515 Mon Sep 17 00:00:00 2001 From: urvisavla Date: Fri, 5 Jul 2024 14:25:24 -0700 Subject: [PATCH 04/24] services/horizon: Reingest from precomputed TxMeta (#5375) * services/horizon: Reingest from precomputed TxMeta * udpate config * Fix govet error --- .../ledgerbackend/buffered_storage_backend.go | 18 ++-- .../buffered_storage_backend_test.go | 63 ++++++++----- ingest/ledgerbackend/ledger_buffer.go | 16 ++-- services/horizon/cmd/db.go | 91 +++++++++++++++++-- services/horizon/config.storagebackend.toml | 19 ++++ services/horizon/internal/ingest/main.go | 86 ++++++------------ support/datastore/datastore.go | 3 +- support/datastore/gcs_datastore.go | 16 +++- support/datastore/gcs_test.go | 16 ++-- support/datastore/mocks.go | 5 + 10 files changed, 211 insertions(+), 122 deletions(-) create mode 100644 services/horizon/config.storagebackend.toml diff --git a/ingest/ledgerbackend/buffered_storage_backend.go b/ingest/ledgerbackend/buffered_storage_backend.go index 4a353bfe22..68e03ffcd3 100644 --- a/ingest/ledgerbackend/buffered_storage_backend.go +++ b/ingest/ledgerbackend/buffered_storage_backend.go @@ -18,12 +18,10 @@ import ( var _ LedgerBackend = (*BufferedStorageBackend)(nil) type BufferedStorageBackendConfig struct { - LedgerBatchConfig datastore.DataStoreSchema - DataStore datastore.DataStore - BufferSize uint32 - NumWorkers uint32 - RetryLimit uint32 - RetryWait time.Duration + BufferSize uint32 `toml:"buffer_size"` + NumWorkers uint32 `toml:"num_workers"` + RetryLimit uint32 `toml:"retry_limit"` + RetryWait time.Duration `toml:"retry_wait"` } // BufferedStorageBackend is a ledger backend that reads from a storage service. @@ -45,7 +43,7 @@ type BufferedStorageBackend struct { } // NewBufferedStorageBackend returns a new BufferedStorageBackend instance. -func NewBufferedStorageBackend(ctx context.Context, config BufferedStorageBackendConfig) (*BufferedStorageBackend, error) { +func NewBufferedStorageBackend(ctx context.Context, config BufferedStorageBackendConfig, dataStore datastore.DataStore) (*BufferedStorageBackend, error) { if config.BufferSize == 0 { return nil, errors.New("buffer size must be > 0") } @@ -54,17 +52,17 @@ func NewBufferedStorageBackend(ctx context.Context, config BufferedStorageBacken return nil, errors.New("number of workers must be <= BufferSize") } - if config.DataStore == nil { + if dataStore == nil { return nil, errors.New("no DataStore provided") } - if config.LedgerBatchConfig.LedgersPerFile <= 0 { + if dataStore.GetSchema(ctx).LedgersPerFile <= 0 { return nil, errors.New("ledgersPerFile must be > 0") } bsBackend := &BufferedStorageBackend{ config: config, - dataStore: config.DataStore, + dataStore: dataStore, } return bsBackend, nil diff --git a/ingest/ledgerbackend/buffered_storage_backend_test.go b/ingest/ledgerbackend/buffered_storage_backend_test.go index f18329fffa..510cffeabb 100644 --- a/ingest/ledgerbackend/buffered_storage_backend_test.go +++ b/ingest/ledgerbackend/buffered_storage_backend_test.go @@ -44,29 +44,21 @@ func createBufferedStorageBackendConfigForTesting() BufferedStorageBackendConfig param := make(map[string]string) param["destination_bucket_path"] = "testURL" - ledgerBatchConfig := datastore.DataStoreSchema{ - LedgersPerFile: 1, - FilesPerPartition: 64000, - } - - dataStore := new(datastore.MockDataStore) - return BufferedStorageBackendConfig{ - LedgerBatchConfig: ledgerBatchConfig, - DataStore: dataStore, - BufferSize: 100, - NumWorkers: 5, - RetryLimit: 3, - RetryWait: time.Microsecond, + BufferSize: 100, + NumWorkers: 5, + RetryLimit: 3, + RetryWait: time.Microsecond, } } func createBufferedStorageBackendForTesting() BufferedStorageBackend { config := createBufferedStorageBackendConfigForTesting() + dataStore := new(datastore.MockDataStore) return BufferedStorageBackend{ config: config, - dataStore: config.DataStore, + dataStore: dataStore, } } @@ -86,6 +78,10 @@ func createMockdataStore(t *testing.T, start, end, partitionSize, count uint32) } mockDataStore.On("GetFile", mock.Anything, objectName).Return(readCloser, nil) } + mockDataStore.On("GetSchema", mock.Anything).Return(datastore.DataStoreSchema{ + LedgersPerFile: count, + FilesPerPartition: partitionSize, + }) t.Cleanup(func() { mockDataStore.AssertExpectations(t) @@ -128,13 +124,17 @@ func createLCMBatchReader(start, end, count uint32) io.ReadCloser { func TestNewBufferedStorageBackend(t *testing.T) { ctx := context.Background() config := createBufferedStorageBackendConfigForTesting() - - bsb, err := NewBufferedStorageBackend(ctx, config) + mockDataStore := new(datastore.MockDataStore) + mockDataStore.On("GetSchema", mock.Anything).Return(datastore.DataStoreSchema{ + LedgersPerFile: uint32(1), + FilesPerPartition: partitionSize, + }) + bsb, err := NewBufferedStorageBackend(ctx, config, mockDataStore) assert.NoError(t, err) - assert.Equal(t, bsb.dataStore, config.DataStore) - assert.Equal(t, uint32(1), bsb.config.LedgerBatchConfig.LedgersPerFile) - assert.Equal(t, uint32(64000), bsb.config.LedgerBatchConfig.FilesPerPartition) + assert.Equal(t, bsb.dataStore, mockDataStore) + assert.Equal(t, uint32(1), bsb.dataStore.GetSchema(ctx).LedgersPerFile) + assert.Equal(t, uint32(64000), bsb.dataStore.GetSchema(ctx).FilesPerPartition) assert.Equal(t, uint32(100), bsb.config.BufferSize) assert.Equal(t, uint32(5), bsb.config.NumWorkers) assert.Equal(t, uint32(3), bsb.config.RetryLimit) @@ -210,12 +210,14 @@ func TestCloudStorageGetLedger_MultipleLedgerPerFile(t *testing.T) { lcmArray := createLCMForTesting(startLedger, endLedger) bsb := createBufferedStorageBackendForTesting() ctx := context.Background() - bsb.config.LedgerBatchConfig.LedgersPerFile = uint32(2) ledgerRange := BoundedRange(startLedger, endLedger) mockDataStore := createMockdataStore(t, startLedger, endLedger, partitionSize, 2) bsb.dataStore = mockDataStore - + mockDataStore.On("GetSchema", mock.Anything).Return(datastore.DataStoreSchema{ + LedgersPerFile: uint32(2), + FilesPerPartition: partitionSize, + }) assert.NoError(t, bsb.PrepareRange(ctx, ledgerRange)) assert.Eventually(t, func() bool { return len(bsb.ledgerBuffer.ledgerQueue) == 2 }, time.Second*5, time.Millisecond*50) @@ -451,6 +453,10 @@ func TestLedgerBufferClose(t *testing.T) { mockDataStore := new(datastore.MockDataStore) partition := ledgerPerFileCount*partitionSize - 1 + mockDataStore.On("GetSchema", mock.Anything).Return(datastore.DataStoreSchema{ + LedgersPerFile: ledgerPerFileCount, + FilesPerPartition: partitionSize, + }) objectName := fmt.Sprintf("FFFFFFFF--0-%d/%08X--%d.xdr.zstd", partition, math.MaxUint32-3, 3) afterPrepareRange := make(chan struct{}) @@ -483,7 +489,10 @@ func TestLedgerBufferBoundedObjectNotFound(t *testing.T) { mockDataStore := new(datastore.MockDataStore) partition := ledgerPerFileCount*partitionSize - 1 - + mockDataStore.On("GetSchema", mock.Anything).Return(datastore.DataStoreSchema{ + LedgersPerFile: ledgerPerFileCount, + FilesPerPartition: partitionSize, + }) objectName := fmt.Sprintf("FFFFFFFF--0-%d/%08X--%d.xdr.zstd", partition, math.MaxUint32-3, 3) mockDataStore.On("GetFile", mock.Anything, objectName).Return(io.NopCloser(&bytes.Buffer{}), os.ErrNotExist).Once() t.Cleanup(func() { @@ -509,7 +518,10 @@ func TestLedgerBufferUnboundedObjectNotFound(t *testing.T) { mockDataStore := new(datastore.MockDataStore) partition := ledgerPerFileCount*partitionSize - 1 - + mockDataStore.On("GetSchema", mock.Anything).Return(datastore.DataStoreSchema{ + LedgersPerFile: ledgerPerFileCount, + FilesPerPartition: partitionSize, + }) objectName := fmt.Sprintf("FFFFFFFF--0-%d/%08X--%d.xdr.zstd", partition, math.MaxUint32-3, 3) iteration := &atomic.Int32{} cancelAfter := int32(bsb.config.RetryLimit) + 2 @@ -551,7 +563,10 @@ func TestLedgerBufferRetryLimit(t *testing.T) { }) bsb.dataStore = mockDataStore - + mockDataStore.On("GetSchema", mock.Anything).Return(datastore.DataStoreSchema{ + LedgersPerFile: ledgerPerFileCount, + FilesPerPartition: partitionSize, + }) assert.NoError(t, bsb.PrepareRange(context.Background(), ledgerRange)) bsb.ledgerBuffer.wg.Wait() diff --git a/ingest/ledgerbackend/ledger_buffer.go b/ingest/ledgerbackend/ledger_buffer.go index 5b2ec57ffc..3fea296b20 100644 --- a/ingest/ledgerbackend/ledger_buffer.go +++ b/ingest/ledgerbackend/ledger_buffer.go @@ -83,19 +83,19 @@ func (bsb *BufferedStorageBackend) newLedgerBuffer(ledgerRange Range) (*ledgerBu // but for easier conceptualization, len(taskQueue) can be interpreted as both pending and in-flight tasks // where we assume the workers are empty and not processing any tasks. for i := 0; i <= int(bsb.config.BufferSize); i++ { - ledgerBuffer.pushTaskQueue() + ledgerBuffer.pushTaskQueue(ctx) } return ledgerBuffer, nil } -func (lb *ledgerBuffer) pushTaskQueue() { +func (lb *ledgerBuffer) pushTaskQueue(ctx context.Context) { // In bounded mode, don't queue past the end ledger if lb.nextTaskLedger > lb.ledgerRange.to && lb.ledgerRange.bounded { return } lb.taskQueue <- lb.nextTaskLedger - lb.nextTaskLedger += lb.config.LedgerBatchConfig.LedgersPerFile + lb.nextTaskLedger += lb.dataStore.GetSchema(ctx).LedgersPerFile } // sleepWithContext returns true upon sleeping without interruption from the context @@ -155,7 +155,7 @@ func (lb *ledgerBuffer) worker(ctx context.Context) { // Thus, the number of tasks decreases by 1 and the priority queue length increases by 1. // This keeps the overall total the same (<= BufferSize). As long as the the ledger buffer invariant // was maintained in the previous state, it is still maintained during this state transition. - lb.storeObject(ledgerObject, sequence) + lb.storeObject(ctx, ledgerObject, sequence) break } } @@ -163,7 +163,7 @@ func (lb *ledgerBuffer) worker(ctx context.Context) { } func (lb *ledgerBuffer) downloadLedgerObject(ctx context.Context, sequence uint32) ([]byte, error) { - objectKey := lb.config.LedgerBatchConfig.GetObjectKeyFromSequenceNumber(sequence) + objectKey := lb.dataStore.GetSchema(ctx).GetObjectKeyFromSequenceNumber(sequence) reader, err := lb.dataStore.GetFile(ctx, objectKey) if err != nil { @@ -180,7 +180,7 @@ func (lb *ledgerBuffer) downloadLedgerObject(ctx context.Context, sequence uint3 return objectBytes, nil } -func (lb *ledgerBuffer) storeObject(ledgerObject []byte, sequence uint32) { +func (lb *ledgerBuffer) storeObject(ctx context.Context, ledgerObject []byte, sequence uint32) { lb.priorityQueueLock.Lock() defer lb.priorityQueueLock.Unlock() @@ -198,7 +198,7 @@ func (lb *ledgerBuffer) storeObject(ledgerObject []byte, sequence uint32) { for lb.ledgerPriorityQueue.Len() > 0 && lb.currentLedger == uint32(lb.ledgerPriorityQueue.Peek().startLedger) { item := lb.ledgerPriorityQueue.Pop() lb.ledgerQueue <- item.payload - lb.currentLedger += lb.config.LedgerBatchConfig.LedgersPerFile + lb.currentLedger += lb.dataStore.GetSchema(ctx).LedgersPerFile } } @@ -215,7 +215,7 @@ func (lb *ledgerBuffer) getFromLedgerQueue(ctx context.Context) (xdr.LedgerClose // Thus len(ledgerQueue) decreases by 1 and the number of tasks increases by 1. // The overall sum below remains the same: // len(taskQueue) + len(ledgerQueue) + ledgerPriorityQueue.Len() <= bsb.config.BufferSize - lb.pushTaskQueue() + lb.pushTaskQueue(ctx) lcmBatch := xdr.LedgerCloseMetaBatch{} decoder := compressxdr.NewXDRDecoder(compressxdr.DefaultCompressor, &lcmBatch) diff --git a/services/horizon/cmd/db.go b/services/horizon/cmd/db.go index e2589a7385..97144267ce 100644 --- a/services/horizon/cmd/db.go +++ b/services/horizon/cmd/db.go @@ -11,14 +11,17 @@ import ( "strconv" "strings" + "github.com/pelletier/go-toml" "github.com/spf13/cobra" "github.com/spf13/viper" + "github.com/stellar/go/ingest/ledgerbackend" horizon "github.com/stellar/go/services/horizon/internal" "github.com/stellar/go/services/horizon/internal/db2/history" "github.com/stellar/go/services/horizon/internal/db2/schema" "github.com/stellar/go/services/horizon/internal/ingest" support "github.com/stellar/go/support/config" + "github.com/stellar/go/support/datastore" "github.com/stellar/go/support/db" "github.com/stellar/go/support/errors" hlog "github.com/stellar/go/support/log" @@ -257,13 +260,21 @@ var dbReingestCmd = &cobra.Command{ } var ( - reingestForce bool - parallelWorkers uint - parallelJobSize uint32 - retries uint - retryBackoffSeconds uint + reingestForce bool + parallelWorkers uint + parallelJobSize uint32 + retries uint + retryBackoffSeconds uint + ledgerBackendStr string + storageBackendConfigPath string + ledgerBackendType ingest.LedgerBackendType ) +type StorageBackendConfig struct { + DataStoreConfig datastore.DataStoreConfig `toml:"datastore_config"` + BufferedStorageBackendConfig ledgerbackend.BufferedStorageBackendConfig `toml:"buffered_storage_backend_config"` +} + func ingestRangeCmdOpts() support.ConfigOptions { return support.ConfigOptions{ { @@ -307,6 +318,42 @@ func ingestRangeCmdOpts() support.ConfigOptions { FlagDefault: uint(5), Usage: "[optional] backoff seconds between reingest retries", }, + { + Name: "ledgerbackend", + ConfigKey: &ledgerBackendStr, + OptType: types.String, + Required: false, + FlagDefault: ingest.CaptiveCoreBackend.String(), + Usage: "[optional] Specify the ledger backend type: 'captive-core' (default) or 'datastore'", + CustomSetValue: func(co *support.ConfigOption) error { + val := viper.GetString(co.Name) + switch val { + case ingest.CaptiveCoreBackend.String(): + ledgerBackendType = ingest.CaptiveCoreBackend + case ingest.BufferedStorageBackend.String(): + ledgerBackendType = ingest.BufferedStorageBackend + default: + return fmt.Errorf("invalid ledger backend: %s, must be 'captive-core' or 'datastore'", val) + } + *co.ConfigKey.(*string) = val + return nil + }, + }, + { + Name: "datastore-config", + ConfigKey: &storageBackendConfigPath, + OptType: types.String, + Required: false, + Usage: "[optional] Specify the path to the datastore config file (required for datastore backend)", + CustomSetValue: func(co *support.ConfigOption) error { + val := viper.GetString(co.Name) + if ledgerBackendType == ingest.BufferedStorageBackend && val == "" { + return errors.New("datastore config file is required for datastore backend type") + } + *co.ConfigKey.(*string) = val + return nil + }, + }, } } @@ -337,7 +384,18 @@ var dbReingestRangeCmd = &cobra.Command{ } } - err := horizon.ApplyFlags(globalConfig, globalFlags, horizon.ApplyOptions{RequireCaptiveCoreFullConfig: false, AlwaysIngest: true}) + var storageBackendConfig StorageBackendConfig + if ledgerBackendType == ingest.BufferedStorageBackend { + cfg, err := toml.LoadFile(storageBackendConfigPath) + if err != nil { + return fmt.Errorf("failed to load config file %v: %w", storageBackendConfigPath, err) + } + if err = cfg.Unmarshal(&storageBackendConfig); err != nil { + return fmt.Errorf("error unmarshalling TOML config: %w", err) + } + } + + err := horizon.ApplyFlags(globalConfig, globalFlags, horizon.ApplyOptions{RequireCaptiveCoreFullConfig: false, AlwaysIngest: false}) if err != nil { return err } @@ -346,6 +404,7 @@ var dbReingestRangeCmd = &cobra.Command{ reingestForce, parallelWorkers, *globalConfig, + storageBackendConfig, ) }, } @@ -385,7 +444,18 @@ var dbFillGapsCmd = &cobra.Command{ withRange = true } - err := horizon.ApplyFlags(globalConfig, globalFlags, horizon.ApplyOptions{RequireCaptiveCoreFullConfig: false, AlwaysIngest: true}) + var storageBackendConfig StorageBackendConfig + if ledgerBackendType == ingest.BufferedStorageBackend { + cfg, err := toml.LoadFile(storageBackendConfigPath) + if err != nil { + return fmt.Errorf("failed to load config file %v: %w", storageBackendConfigPath, err) + } + if err = cfg.Unmarshal(&storageBackendConfig); err != nil { + return fmt.Errorf("error unmarshalling TOML config: %w", err) + } + } + + err := horizon.ApplyFlags(globalConfig, globalFlags, horizon.ApplyOptions{RequireCaptiveCoreFullConfig: false, AlwaysIngest: false}) if err != nil { return err } @@ -404,11 +474,11 @@ var dbFillGapsCmd = &cobra.Command{ hlog.Infof("found gaps %v", gaps) } - return runDBReingestRange(gaps, reingestForce, parallelWorkers, *globalConfig) + return runDBReingestRange(gaps, reingestForce, parallelWorkers, *globalConfig, storageBackendConfig) }, } -func runDBReingestRange(ledgerRanges []history.LedgerRange, reingestForce bool, parallelWorkers uint, config horizon.Config) error { +func runDBReingestRange(ledgerRanges []history.LedgerRange, reingestForce bool, parallelWorkers uint, config horizon.Config, storageBackendConfig StorageBackendConfig) error { var err error if reingestForce && parallelWorkers > 1 { @@ -435,6 +505,9 @@ func runDBReingestRange(ledgerRanges []history.LedgerRange, reingestForce bool, RoundingSlippageFilter: config.RoundingSlippageFilter, MaxLedgerPerFlush: maxLedgersPerFlush, SkipTxmeta: config.SkipTxmeta, + LedgerBackendType: ledgerBackendType, + DataStoreConfig: storageBackendConfig.DataStoreConfig, + BufferedBackendConfig: storageBackendConfig.BufferedStorageBackendConfig, } if ingestConfig.HistorySession, err = db.Open("postgres", config.DatabaseURL); err != nil { diff --git a/services/horizon/config.storagebackend.toml b/services/horizon/config.storagebackend.toml new file mode 100644 index 0000000000..538b793b54 --- /dev/null +++ b/services/horizon/config.storagebackend.toml @@ -0,0 +1,19 @@ +[buffered_storage_backend_config] +buffer_size = 5 # The size of the buffer +num_workers = 5 # Number of workers +retry_limit = 3 # Number of retries allowed +retry_wait = "30s" # Duration to wait before retrying in seconds + +# Datastore Configuration +[datastore_config] +# Specifies the type of datastore. Currently, only Google Cloud Storage (GCS) is supported. +type = "GCS" + +[datastore_config.params] +# The Google Cloud Storage bucket path for storing data, with optional subpaths for organization. +destination_bucket_path = "path/to/my/bucket" + +[datastore_config.schema] +# Configuration for data organization +ledgers_per_file = 1 # Number of ledgers stored in each file. +files_per_partition = 64000 # Number of files per partition/directory. diff --git a/services/horizon/internal/ingest/main.go b/services/horizon/internal/ingest/main.go index e5f5cc1e95..a70a31fdf9 100644 --- a/services/horizon/internal/ingest/main.go +++ b/services/horizon/internal/ingest/main.go @@ -83,38 +83,24 @@ const ( var log = logpkg.DefaultLogger.WithField("service", "ingest") -type LedgerMetaBackendType int64 +type LedgerBackendType uint const ( - LedgerBackendCaptiveCore LedgerMetaBackendType = iota - LedgerBackendPrecomputed + CaptiveCoreBackend LedgerBackendType = iota + BufferedStorageBackend ) -func (s LedgerMetaBackendType) String() string { +func (s LedgerBackendType) String() string { switch s { - case LedgerBackendCaptiveCore: - return "captive core" - case LedgerBackendPrecomputed: - return "precomputed" - default: - return "" + case CaptiveCoreBackend: + return "captive-core" + case BufferedStorageBackend: + return "datastore" } -} - -type BufferedBackendConfig struct { - BufferSize uint32 `toml:"size"` - NumWorkers uint32 `toml:"num_workers"` - RetryLimit uint32 `toml:"retry_limit"` - RetryWait time.Duration `toml:"retry_wait"` -} - -type PrecomputedLedgerMetaConfig struct { - DataStoreConfig datastore.DataStoreConfig `toml:"datastore_config"` - BufferedBackendConfig BufferedBackendConfig `toml:"buffered_backend_config"` + return "" } type Config struct { - LedgerMetaBackendType LedgerMetaBackendType StellarCoreURL string CaptiveCoreBinaryPath string CaptiveCoreStoragePath string @@ -148,7 +134,9 @@ type Config struct { ReapConfig ReapConfig - PrecomputedMetaConfig *PrecomputedLedgerMetaConfig + LedgerBackendType LedgerBackendType + DataStoreConfig datastore.DataStoreConfig + BufferedBackendConfig ledgerbackend.BufferedStorageBackendConfig } const ( @@ -296,11 +284,24 @@ func NewSystem(config Config) (System, error) { cancel() return nil, errors.Wrap(err, "error creating history archive") } - var ledgerBackend ledgerbackend.LedgerBackend - switch config.LedgerMetaBackendType { - case LedgerBackendCaptiveCore: + if config.LedgerBackendType == BufferedStorageBackend { + // Ingest from datastore + var dataStore datastore.DataStore + dataStore, err = datastore.NewDataStore(context.Background(), config.DataStoreConfig) + if err != nil { + cancel() + return nil, fmt.Errorf("failed to create datastore: %w", err) + } + ledgerBackend, err = ledgerbackend.NewBufferedStorageBackend(ctx, config.BufferedBackendConfig, dataStore) + if err != nil { + cancel() + return nil, fmt.Errorf("failed to create buffered storage backend: %w", err) + } + } else { + // Ingest from local captive core + logger := log.WithField("subservice", "stellar-core") ledgerBackend, err = ledgerbackend.NewCaptive( ledgerbackend.CaptiveCoreConfig{ @@ -323,37 +324,6 @@ func NewSystem(config Config) (System, error) { cancel() return nil, errors.Wrap(err, "error creating captive core backend") } - log.Infof("successfully created ledger backend of type captive core") - case LedgerBackendPrecomputed: - if config.PrecomputedMetaConfig == nil { - cancel() - return nil, errors.New("error creating precomputed buffered backend, precomputed backend config is not present") - } - precompConfig := config.PrecomputedMetaConfig - - dataStore, err := datastore.NewDataStore(ctx, precompConfig.DataStoreConfig) - if err != nil { - cancel() - return nil, errors.Wrapf(err, "error creating datastore from config, %v", precompConfig.DataStoreConfig) - } - - bufferedConfig := ledgerbackend.BufferedStorageBackendConfig{ - LedgerBatchConfig: precompConfig.DataStoreConfig.Schema, - DataStore: dataStore, - BufferSize: precompConfig.BufferedBackendConfig.BufferSize, - NumWorkers: precompConfig.BufferedBackendConfig.NumWorkers, - RetryLimit: precompConfig.BufferedBackendConfig.RetryLimit, - RetryWait: precompConfig.BufferedBackendConfig.RetryWait, - } - - if ledgerBackend, err = ledgerbackend.NewBufferedStorageBackend(ctx, bufferedConfig); err != nil { - cancel() - return nil, errors.Wrapf(err, "error creating buffered storage backend, %v", bufferedConfig) - } - log.Infof("successfully created ledger backend of type buffered storage") - default: - cancel() - return nil, errors.Errorf("unsupported ledger backend type %v", config.LedgerMetaBackendType.String()) } historyQ := &history.Q{config.HistorySession.Clone()} diff --git a/support/datastore/datastore.go b/support/datastore/datastore.go index e7e999345d..e4f41920e8 100644 --- a/support/datastore/datastore.go +++ b/support/datastore/datastore.go @@ -21,6 +21,7 @@ type DataStore interface { PutFileIfNotExists(ctx context.Context, path string, in io.WriterTo, metaData map[string]string) (bool, error) Exists(ctx context.Context, path string) (bool, error) Size(ctx context.Context, path string) (int64, error) + GetSchema(ctx context.Context) DataStoreSchema Close() error } @@ -32,7 +33,7 @@ func NewDataStore(ctx context.Context, datastoreConfig DataStoreConfig) (DataSto if !ok { return nil, errors.Errorf("Invalid GCS config, no destination_bucket_path") } - return NewGCSDataStore(ctx, destinationBucketPath) + return NewGCSDataStore(ctx, destinationBucketPath, datastoreConfig.Schema) default: return nil, errors.Errorf("Invalid datastore type %v, not supported", datastoreConfig.Type) } diff --git a/support/datastore/gcs_datastore.go b/support/datastore/gcs_datastore.go index cdedea086d..3cf48b3fcb 100644 --- a/support/datastore/gcs_datastore.go +++ b/support/datastore/gcs_datastore.go @@ -24,18 +24,19 @@ type GCSDataStore struct { client *storage.Client bucket *storage.BucketHandle prefix string + schema DataStoreSchema } -func NewGCSDataStore(ctx context.Context, bucketPath string) (DataStore, error) { +func NewGCSDataStore(ctx context.Context, bucketPath string, schema DataStoreSchema) (DataStore, error) { client, err := storage.NewClient(ctx) if err != nil { return nil, err } - return FromGCSClient(ctx, client, bucketPath) + return FromGCSClient(ctx, client, bucketPath, schema) } -func FromGCSClient(ctx context.Context, client *storage.Client, bucketPath string) (DataStore, error) { +func FromGCSClient(ctx context.Context, client *storage.Client, bucketPath string, schema DataStoreSchema) (DataStore, error) { // append the gcs:// scheme to enable usage of the url package reliably to // get parse bucket name which is first path segment as URL.Host gcsBucketURL := fmt.Sprintf("gcs://%s", bucketPath) @@ -55,7 +56,8 @@ func FromGCSClient(ctx context.Context, client *storage.Client, bucketPath strin return nil, fmt.Errorf("failed to retrieve bucket attributes: %w", err) } - return &GCSDataStore{client: client, bucket: bucket, prefix: prefix}, nil + // TODO: Datastore schema to be fetched from the datastore https://stellarorg.atlassian.net/browse/HUBBLE-397 + return &GCSDataStore{client: client, bucket: bucket, prefix: prefix, schema: schema}, nil } // GetFileMetadata retrieves the metadata for the specified file in the GCS bucket. @@ -177,3 +179,9 @@ func (b GCSDataStore) putFile(ctx context.Context, filePath string, in io.Writer } return w.Close() } + +// GetSchema returns the schema information which defines the structure +// and organization of data in the datastore. +func (b GCSDataStore) GetSchema(ctx context.Context) DataStoreSchema { + return b.schema +} diff --git a/support/datastore/gcs_test.go b/support/datastore/gcs_test.go index 8838e8dadb..618b5d602a 100644 --- a/support/datastore/gcs_test.go +++ b/support/datastore/gcs_test.go @@ -24,7 +24,7 @@ func TestGCSExists(t *testing.T) { }) defer server.Stop() - store, err := FromGCSClient(context.Background(), server.Client(), "test-bucket/objects/testnet") + store, err := FromGCSClient(context.Background(), server.Client(), "test-bucket/objects/testnet", DataStoreSchema{}) require.NoError(t, err) t.Cleanup(func() { require.NoError(t, store.Close()) @@ -52,7 +52,7 @@ func TestGCSSize(t *testing.T) { }) defer server.Stop() - store, err := FromGCSClient(context.Background(), server.Client(), "test-bucket/objects/testnet") + store, err := FromGCSClient(context.Background(), server.Client(), "test-bucket/objects/testnet", DataStoreSchema{}) require.NoError(t, err) t.Cleanup(func() { require.NoError(t, store.Close()) @@ -86,7 +86,7 @@ func TestGCSPutFile(t *testing.T) { DefaultEventBasedHold: false, }) - store, err := FromGCSClient(context.Background(), server.Client(), "test-bucket/objects/testnet") + store, err := FromGCSClient(context.Background(), server.Client(), "test-bucket/objects/testnet", DataStoreSchema{}) require.NoError(t, err) t.Cleanup(func() { require.NoError(t, store.Close()) @@ -138,7 +138,7 @@ func TestGCSPutFileIfNotExists(t *testing.T) { }) defer server.Stop() - store, err := FromGCSClient(context.Background(), server.Client(), "test-bucket/objects/testnet") + store, err := FromGCSClient(context.Background(), server.Client(), "test-bucket/objects/testnet", DataStoreSchema{}) require.NoError(t, err) t.Cleanup(func() { require.NoError(t, store.Close()) @@ -187,7 +187,7 @@ func TestGCSPutFileWithMetadata(t *testing.T) { DefaultEventBasedHold: false, }) - store, err := FromGCSClient(context.Background(), server.Client(), "test-bucket/objects/testnet") + store, err := FromGCSClient(context.Background(), server.Client(), "test-bucket/objects/testnet", DataStoreSchema{}) require.NoError(t, err) t.Cleanup(func() { require.NoError(t, store.Close()) @@ -255,7 +255,7 @@ func TestGCSPutFileIfNotExistsWithMetadata(t *testing.T) { }) defer server.Stop() - store, err := FromGCSClient(context.Background(), server.Client(), "test-bucket/objects/testnet") + store, err := FromGCSClient(context.Background(), server.Client(), "test-bucket/objects/testnet", DataStoreSchema{}) require.NoError(t, err) t.Cleanup(func() { require.NoError(t, store.Close()) @@ -323,7 +323,7 @@ func TestGCSGetNonExistentFile(t *testing.T) { }) defer server.Stop() - store, err := FromGCSClient(context.Background(), server.Client(), "test-bucket/objects/testnet") + store, err := FromGCSClient(context.Background(), server.Client(), "test-bucket/objects/testnet", DataStoreSchema{}) require.NoError(t, err) t.Cleanup(func() { require.NoError(t, store.Close()) @@ -365,7 +365,7 @@ func TestGCSGetFileValidatesCRC32C(t *testing.T) { }) defer server.Stop() - store, err := FromGCSClient(context.Background(), server.Client(), "test-bucket/objects/testnet") + store, err := FromGCSClient(context.Background(), server.Client(), "test-bucket/objects/testnet", DataStoreSchema{}) require.NoError(t, err) t.Cleanup(func() { require.NoError(t, store.Close()) diff --git a/support/datastore/mocks.go b/support/datastore/mocks.go index 96c15c1371..6b505e6478 100644 --- a/support/datastore/mocks.go +++ b/support/datastore/mocks.go @@ -47,6 +47,11 @@ func (m *MockDataStore) Close() error { return args.Error(0) } +func (m *MockDataStore) GetSchema(ctx context.Context) DataStoreSchema { + args := m.Called(ctx) + return args.Get(0).(DataStoreSchema) +} + type MockResumableManager struct { mock.Mock } From b37be69688e0595cf5111a2542dc671c54289991 Mon Sep 17 00:00:00 2001 From: shawn Date: Mon, 8 Jul 2024 17:13:58 -0700 Subject: [PATCH 05/24] #4911: add unit test coverage on buffered backend ingest system creation (#5377) --- go.mod | 36 ++++---- go.sum | 88 +++++++++---------- .../ledgerbackend/buffered_storage_backend.go | 2 + services/horizon/cmd/db.go | 18 ++-- services/horizon/internal/ingest/main.go | 16 ++-- services/horizon/internal/ingest/main_test.go | 27 ++++++ support/datastore/datastore.go | 2 + 7 files changed, 112 insertions(+), 77 deletions(-) diff --git a/go.mod b/go.mod index 9e1caca7aa..623399e969 100644 --- a/go.mod +++ b/go.mod @@ -6,7 +6,7 @@ toolchain go1.22.1 require ( cloud.google.com/go/firestore v1.15.0 // indirect - cloud.google.com/go/storage v1.40.0 + cloud.google.com/go/storage v1.41.0 firebase.google.com/go v3.12.0+incompatible github.com/2opremio/pretty v0.2.2-0.20230601220618-e1d5758b2a95 github.com/BurntSushi/toml v1.3.2 @@ -52,7 +52,7 @@ require ( github.com/stretchr/testify v1.9.0 github.com/tyler-smith/go-bip39 v0.0.0-20180618194314-52158e4697b8 github.com/xdrpp/goxdr v0.1.1 - google.golang.org/api v0.177.0 + google.golang.org/api v0.183.0 gopkg.in/gavv/httpexpect.v1 v1.0.0-20170111145843-40724cf1e4a0 gopkg.in/square/go-jose.v2 v2.4.1 gopkg.in/tylerb/graceful.v1 v1.2.15 @@ -60,16 +60,16 @@ require ( require ( github.com/cenkalti/backoff/v4 v4.2.1 - github.com/fsouza/fake-gcs-server v1.49.0 + github.com/fsouza/fake-gcs-server v1.49.2 ) require ( - cloud.google.com/go/auth v0.3.0 // indirect + cloud.google.com/go/auth v0.5.1 // indirect cloud.google.com/go/auth/oauth2adapt v0.2.2 // indirect cloud.google.com/go/compute/metadata v0.3.0 // indirect cloud.google.com/go/iam v1.1.8 // indirect - cloud.google.com/go/longrunning v0.5.6 // indirect - cloud.google.com/go/pubsub v1.37.0 // indirect + cloud.google.com/go/longrunning v0.5.7 // indirect + cloud.google.com/go/pubsub v1.38.0 // indirect github.com/andybalholm/brotli v1.0.4 // indirect github.com/certifi/gocertifi v0.0.0-20210507211836-431795d63e8d // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect @@ -105,15 +105,15 @@ require ( golang.org/x/mod v0.13.0 // indirect golang.org/x/sync v0.7.0 // indirect golang.org/x/tools v0.14.0 // indirect - google.golang.org/genproto/googleapis/api v0.0.0-20240429193739-8cf5692501f6 // indirect - google.golang.org/genproto/googleapis/rpc v0.0.0-20240429193739-8cf5692501f6 // indirect + google.golang.org/genproto/googleapis/api v0.0.0-20240521202816-d264139d666e // indirect + google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157 // indirect gopkg.in/djherbis/atime.v1 v1.0.0 // indirect gopkg.in/djherbis/stream.v1 v1.3.1 // indirect gopkg.in/ini.v1 v1.67.0 // indirect ) require ( - cloud.google.com/go v0.112.2 // indirect + cloud.google.com/go v0.114.0 // indirect github.com/ajg/form v0.0.0-20160822230020-523a5da1a92f // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/buger/goreplay v1.3.2 @@ -124,7 +124,7 @@ require ( github.com/golang/protobuf v1.5.4 // indirect github.com/google/go-cmp v0.6.0 // indirect github.com/google/go-querystring v0.0.0-20160401233042-9235644dd9e5 // indirect - github.com/googleapis/gax-go/v2 v2.12.3 // indirect + github.com/googleapis/gax-go/v2 v2.12.4 // indirect github.com/hashicorp/golang-lru v1.0.2 github.com/imkira/go-interpol v1.1.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect @@ -154,17 +154,17 @@ require ( github.com/yudai/golcs v0.0.0-20150405163532-d1c525dea8ce // indirect github.com/yudai/pp v2.0.1+incompatible // indirect go.opencensus.io v0.24.0 // indirect - golang.org/x/crypto v0.22.0 // indirect + golang.org/x/crypto v0.23.0 // indirect golang.org/x/exp v0.0.0-20231006140011-7918f672742d - golang.org/x/net v0.24.0 // indirect - golang.org/x/oauth2 v0.20.0 // indirect - golang.org/x/sys v0.19.0 // indirect - golang.org/x/term v0.19.0 // indirect - golang.org/x/text v0.14.0 // indirect + golang.org/x/net v0.25.0 // indirect + golang.org/x/oauth2 v0.21.0 // indirect + golang.org/x/sys v0.20.0 // indirect + golang.org/x/term v0.20.0 // indirect + golang.org/x/text v0.15.0 // indirect golang.org/x/time v0.5.0 google.golang.org/appengine v1.6.8 // indirect - google.golang.org/genproto v0.0.0-20240401170217-c3f982113cda // indirect - google.golang.org/grpc v1.63.2 // indirect + google.golang.org/genproto v0.0.0-20240528184218-531527333157 // indirect + google.golang.org/grpc v1.64.0 // indirect google.golang.org/protobuf v1.34.1 // indirect gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect diff --git a/go.sum b/go.sum index ab95f599dd..af91329d53 100644 --- a/go.sum +++ b/go.sum @@ -17,10 +17,10 @@ cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHOb cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI= cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk= cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY= -cloud.google.com/go v0.112.2 h1:ZaGT6LiG7dBzi6zNOvVZwacaXlmf3lRqnC4DQzqyRQw= -cloud.google.com/go v0.112.2/go.mod h1:iEqjp//KquGIJV/m+Pk3xecgKNhV+ry+vVTsy4TbDms= -cloud.google.com/go/auth v0.3.0 h1:PRyzEpGfx/Z9e8+lHsbkoUVXD0gnu4MNmm7Gp8TQNIs= -cloud.google.com/go/auth v0.3.0/go.mod h1:lBv6NKTWp8E3LPzmO1TbiiRKc4drLOfHsgmlH9ogv5w= +cloud.google.com/go v0.114.0 h1:OIPFAdfrFDFO2ve2U7r/H5SwSbBzEdrBdE7xkgwc+kY= +cloud.google.com/go v0.114.0/go.mod h1:ZV9La5YYxctro1HTPug5lXH/GefROyW8PPD4T8n9J8E= +cloud.google.com/go/auth v0.5.1 h1:0QNO7VThG54LUzKiQxv8C6x1YX7lUrzlAa1nVLF8CIw= +cloud.google.com/go/auth v0.5.1/go.mod h1:vbZT8GjzDf3AVqCcQmqeeM32U9HBFc32vVVAbwDsa6s= cloud.google.com/go/auth/oauth2adapt v0.2.2 h1:+TTV8aXpjeChS9M+aTtN/TjdQnzJvmzKFt//oWu7HX4= cloud.google.com/go/auth/oauth2adapt v0.2.2/go.mod h1:wcYjgpZI9+Yu7LyYBg4pqSiaRkfEK3GQcpb7C/uyF1Q= cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o= @@ -37,24 +37,24 @@ cloud.google.com/go/firestore v1.15.0 h1:/k8ppuWOtNuDHt2tsRV42yI21uaGnKDEQnRFeBp cloud.google.com/go/firestore v1.15.0/go.mod h1:GWOxFXcv8GZUtYpWHw/w6IuYNux/BtmeVTMmjrm4yhk= cloud.google.com/go/iam v1.1.8 h1:r7umDwhj+BQyz0ScZMp4QrGXjSTI3ZINnpgU2nlB/K0= cloud.google.com/go/iam v1.1.8/go.mod h1:GvE6lyMmfxXauzNq8NbgJbeVQNspG+tcdL/W8QO1+zE= -cloud.google.com/go/kms v1.15.8 h1:szIeDCowID8th2i8XE4uRev5PMxQFqW+JjwYxL9h6xs= -cloud.google.com/go/kms v1.15.8/go.mod h1:WoUHcDjD9pluCg7pNds131awnH429QGvRM3N/4MyoVs= -cloud.google.com/go/longrunning v0.5.6 h1:xAe8+0YaWoCKr9t1+aWe+OeQgN/iJK1fEgZSXmjuEaE= -cloud.google.com/go/longrunning v0.5.6/go.mod h1:vUaDrWYOMKRuhiv6JBnn49YxCPz2Ayn9GqyjaBT8/mA= +cloud.google.com/go/kms v1.17.1 h1:5k0wXqkxL+YcXd4viQzTqCgzzVKKxzgrK+rCZJytEQs= +cloud.google.com/go/kms v1.17.1/go.mod h1:DCMnCF/apA6fZk5Cj4XsD979OyHAqFasPuA5Sd0kGlQ= +cloud.google.com/go/longrunning v0.5.7 h1:WLbHekDbjK1fVFD3ibpFFVoyizlLRl73I7YKuAKilhU= +cloud.google.com/go/longrunning v0.5.7/go.mod h1:8GClkudohy1Fxm3owmBGid8W0pSgodEMwEAztp38Xng= cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I= cloud.google.com/go/pubsub v1.1.0/go.mod h1:EwwdRX2sKPjnvnqCa270oGRyludottCI76h+R3AArQw= cloud.google.com/go/pubsub v1.2.0/go.mod h1:jhfEVHT8odbXTkndysNHCcx0awwzvfOlguIAii9o8iA= cloud.google.com/go/pubsub v1.3.1/go.mod h1:i+ucay31+CNRpDW4Lu78I4xXG+O1r/MAHgjpRVR+TSU= -cloud.google.com/go/pubsub v1.37.0 h1:0uEEfaB1VIJzabPpwpZf44zWAKAme3zwKKxHk7vJQxQ= -cloud.google.com/go/pubsub v1.37.0/go.mod h1:YQOQr1uiUM092EXwKs56OPT650nwnawc+8/IjoUeGzQ= +cloud.google.com/go/pubsub v1.38.0 h1:J1OT7h51ifATIedjqk/uBNPh+1hkvUaH4VKbz4UuAsc= +cloud.google.com/go/pubsub v1.38.0/go.mod h1:IPMJSWSus/cu57UyR01Jqa/bNOQA+XnPF6Z4dKW4fAA= cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiyrjsg+URw= cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos= cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk= cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs= cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0= cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo= -cloud.google.com/go/storage v1.40.0 h1:VEpDQV5CJxFmJ6ueWNsKxcr1QAYOXEgxDa+sBbJahPw= -cloud.google.com/go/storage v1.40.0/go.mod h1:Rrj7/hKlG87BLqDJYtwR0fbPld8uJPbQ2ucUMY7Ir0g= +cloud.google.com/go/storage v1.41.0 h1:RusiwatSu6lHeEXe3kglxakAmAbfV+rhtPqA6i8RBx0= +cloud.google.com/go/storage v1.41.0/go.mod h1:J1WCa/Z2FcgdEDuPUY8DxT5I+d9mFKsCepp5vR6Sq80= dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU= firebase.google.com/go v3.12.0+incompatible h1:q70KCp/J0oOL8kJ8oV2j3646kV4TB8Y5IvxXC0WT1bo= firebase.google.com/go v3.12.0+incompatible/go.mod h1:xlah6XbEyW6tbfSklcfe5FHJIwjt8toICdV5Wh9ptHs= @@ -140,8 +140,8 @@ github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMo github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY= github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw= -github.com/fsouza/fake-gcs-server v1.49.0 h1:4x1RxKuqoqhZrXogtj5nInQnIjQylxld43tKrkPHnmE= -github.com/fsouza/fake-gcs-server v1.49.0/go.mod h1:FJYZxdHQk2nGxrczFjLbDv8h6SnYXxSxcnM14eeespA= +github.com/fsouza/fake-gcs-server v1.49.2 h1:fukDqzEQM50QkA0jAbl6cLqeDu3maQjwZBuys759TR4= +github.com/fsouza/fake-gcs-server v1.49.2/go.mod h1:17SYzJEXRcaAA5ATwwvgBkSIqIy7r1icnGM0y/y4foY= github.com/gavv/monotime v0.0.0-20161010190848-47d58efa6955 h1:gmtGRvSexPU4B1T/yYo0sLOKzER1YT+b4kPxPpm0Ty4= github.com/gavv/monotime v0.0.0-20161010190848-47d58efa6955/go.mod h1:vmp8DIyckQMXOPl0AQVHt+7n5h7Gb7hS6CUydiV8QeA= github.com/getsentry/raven-go v0.2.0 h1:no+xWJRb5ZI7eE8TWgIq1jLulQiIoLG0IfYxv5JYMGs= @@ -230,8 +230,8 @@ github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPg github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs= github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0= -github.com/google/martian/v3 v3.3.2 h1:IqNFLAmvJOgVlpdEBiQbDc2EwKW77amAycfTuWKdfvw= -github.com/google/martian/v3 v3.3.2/go.mod h1:oBOf6HBosgwRXnUGWUB05QECsc6uvmMiJ3+6W4l/CUk= +github.com/google/martian/v3 v3.3.3 h1:DIhPTQrbPkgs2yJYdXU/eNACCG5DVQjySNRNlflZ9Fc= +github.com/google/martian/v3 v3.3.3/go.mod h1:iEPrYcgCF7jA9OtScMFQyAlZZ4YXTKEtJ1E6RWzmBA0= github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc= github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM= @@ -256,8 +256,8 @@ github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfF github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk= -github.com/googleapis/gax-go/v2 v2.12.3 h1:5/zPPDvw8Q1SuXjrqrZslrqT7dL/uJT2CQii/cLCKqA= -github.com/googleapis/gax-go/v2 v2.12.3/go.mod h1:AKloxT6GtNbaLm8QTNSidHUVsHYcBHwWRvkNFJUQcS4= +github.com/googleapis/gax-go/v2 v2.12.4 h1:9gWcmF85Wvq4ryPFvGFaOgPIs1AQX0d0bcbGw4Z96qg= +github.com/googleapis/gax-go/v2 v2.12.4/go.mod h1:KYEYLorsnIGDi/rPC8b5TdlB9kbKoFubselGIoBMCwI= github.com/googleapis/google-cloud-go-testing v0.0.0-20200911160855-bcd43fbb19e8/go.mod h1:dvDLG8qkwmyD9a/MJJN3XJcT3xFxOKAvTZGvuZmac9g= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1 h1:EGx4pi6eqNxGaHF6qqu48+N2wcFQ5qg5FXgOdqsJ5d8= github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY= @@ -351,8 +351,8 @@ github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zk github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4= github.com/minio/md5-simd v1.1.2 h1:Gdi1DZK69+ZVMoNHRXJyNcxrMA4dSxoYHZSQbirFg34= github.com/minio/md5-simd v1.1.2/go.mod h1:MzdKDxYpY2BT9XQFocsiZf/NKVtR7nkE4RoEpN+20RM= -github.com/minio/minio-go/v7 v7.0.70 h1:1u9NtMgfK1U42kUxcsl5v0yj6TEOPR497OAQxpJnn2g= -github.com/minio/minio-go/v7 v7.0.70/go.mod h1:4yBA8v80xGA30cfM3fz0DKYMXunWl/AV/6tWEs9ryzo= +github.com/minio/minio-go/v7 v7.0.71 h1:No9XfOKTYi6i0GnBj+WZwD8WP5GZfL7n7GOjRqCdAjA= +github.com/minio/minio-go/v7 v7.0.71/go.mod h1:4yBA8v80xGA30cfM3fz0DKYMXunWl/AV/6tWEs9ryzo= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY= @@ -494,8 +494,8 @@ github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9de github.com/yuin/goldmark v1.1.32/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -go.einride.tech/aip v0.66.0 h1:XfV+NQX6L7EOYK11yoHHFtndeaWh3KbD9/cN/6iWEt8= -go.einride.tech/aip v0.66.0/go.mod h1:qAhMsfT7plxBX+Oy7Huol6YUvZ0ZzdUz26yZsQwfl1M= +go.einride.tech/aip v0.67.1 h1:d/4TW92OxXBngkSOwWS2CH5rez869KpKMaN44mdxkFI= +go.einride.tech/aip v0.67.1/go.mod h1:ZGX4/zKw8dcgzdLsrvpOOGxfxI2QSk12SlP7d6c0/XI= go.opencensus.io v0.21.0/go.mod h1:mSImk1erAIZhrmZN+AvHh14ztQfjbGwt4TtuofqLduU= go.opencensus.io v0.22.0/go.mod h1:+kGneAE2xo2IficOXnaByMWTGM9T73dGwxeWcUqIpI8= go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= @@ -528,8 +528,8 @@ golang.org/x/crypto v0.0.0-20210421170649-83a5a9bb288b/go.mod h1:T9bdIzuCu7OtxOm golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20220214200702-86341886e292/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/crypto v0.0.0-20220722155217-630584e8d5aa/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= -golang.org/x/crypto v0.22.0 h1:g1v0xeRhjcugydODzvb3mEM9SQ0HGp9s/nh3COQ/C30= -golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M= +golang.org/x/crypto v0.23.0 h1:dIJU/v2J8Mdglj/8rJ6UUOM3Zc9zLZxVZwwxMooUSAI= +golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8= @@ -606,8 +606,8 @@ golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qx golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk= golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c= golang.org/x/net v0.1.0/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco= -golang.org/x/net v0.24.0 h1:1PcaxkF854Fu3+lvBIx5SYn9wRlBzzcnHZSiaFFAb0w= -golang.org/x/net v0.24.0/go.mod h1:2Q7sJY5mzlzWjKtYUEXSlBWCdyaioyXzRB2RtU8KVE8= +golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac= +golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw= @@ -617,8 +617,8 @@ golang.org/x/oauth2 v0.0.0-20200902213428-5d25da1a8d43/go.mod h1:KelEdhl1UZF7XfJ golang.org/x/oauth2 v0.0.0-20201109201403-9fd604954f58/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20201208152858-08078c50e5b5/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= golang.org/x/oauth2 v0.0.0-20210218202405-ba52d332ba99/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A= -golang.org/x/oauth2 v0.20.0 h1:4mQdhULixXKP1rwYBW0vAijoXnkTG0BLCDRzfe1idMo= -golang.org/x/oauth2 v0.20.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= +golang.org/x/oauth2 v0.21.0 h1:tsimM75w1tF/uws5rbeHzIWxEqElMehnc+iW793zsZs= +golang.org/x/oauth2 v0.21.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -682,13 +682,13 @@ golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.19.0 h1:q5f1RH2jigJ1MoAWp2KTp3gm5zAGFUTarQZ5U386+4o= -golang.org/x/sys v0.19.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= +golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y= +golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8= -golang.org/x/term v0.19.0 h1:+ThwsDv+tYfnJFhF4L8jITxu1tdTWRTZpdsWgEgjL6Q= -golang.org/x/term v0.19.0/go.mod h1:2CuTdWZ7KHSQwUzKva0cbMg6q2DMI3Mmxp+gKJbskEk= +golang.org/x/term v0.20.0 h1:VnkxpohqXaOBYJtBmEppKUG6mXpi+4O6purfc2+sMhw= +golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY= golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= @@ -699,8 +699,8 @@ golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.3.8/go.mod h1:E6s5w1FMmriuDzIBO73fBruAKo1PCIq6d2Q6DHfQ8WQ= golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= -golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ= -golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= +golang.org/x/text v0.15.0 h1:h1V/4gjBv8v9cjcR6+AR5+/cIYK5N/WAgiv4xlsEtAk= +golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -783,8 +783,8 @@ google.golang.org/api v0.30.0/go.mod h1:QGmEvQ87FHZNiUVJkT14jQNYJ4ZJjdRF23ZXz513 google.golang.org/api v0.35.0/go.mod h1:/XrVsuzM0rZmrsbjJutiuftIzeuTQcEeaYcSk/mQ1dg= google.golang.org/api v0.36.0/go.mod h1:+z5ficQTmoYpPn8LCUNVpK5I7hwkpjbcgqA7I34qYtE= google.golang.org/api v0.40.0/go.mod h1:fYKFpnQN0DsDSKRVRcQSDQNtqWPfM9i+zNPxepjRCQ8= -google.golang.org/api v0.177.0 h1:8a0p/BbPa65GlqGWtUKxot4p0TV8OGOfyTjtmkXNXmk= -google.golang.org/api v0.177.0/go.mod h1:srbhue4MLjkjbkux5p3dw/ocYOSZTaIEvf7bCOnFQDw= +google.golang.org/api v0.183.0 h1:PNMeRDwo1pJdgNcFQ9GstuLe/noWKIc89pRWRLMvLwE= +google.golang.org/api v0.183.0/go.mod h1:q43adC5/pHoSZTx5h2mSmdF7NcyfW9JuDyIOJAgS9ZQ= google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= @@ -830,12 +830,12 @@ google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6D google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no= -google.golang.org/genproto v0.0.0-20240401170217-c3f982113cda h1:wu/KJm9KJwpfHWhkkZGohVC6KRrc1oJNr4jwtQMOQXw= -google.golang.org/genproto v0.0.0-20240401170217-c3f982113cda/go.mod h1:g2LLCvCeCSir/JJSWosk19BR4NVxGqHUC6rxIRsd7Aw= -google.golang.org/genproto/googleapis/api v0.0.0-20240429193739-8cf5692501f6 h1:DTJM0R8LECCgFeUwApvcEJHz85HLagW8uRENYxHh1ww= -google.golang.org/genproto/googleapis/api v0.0.0-20240429193739-8cf5692501f6/go.mod h1:10yRODfgim2/T8csjQsMPgZOMvtytXKTDRzH6HRGzRw= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240429193739-8cf5692501f6 h1:DujSIu+2tC9Ht0aPNA7jgj23Iq8Ewi5sgkQ++wdvonE= -google.golang.org/genproto/googleapis/rpc v0.0.0-20240429193739-8cf5692501f6/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY= +google.golang.org/genproto v0.0.0-20240528184218-531527333157 h1:u7WMYrIrVvs0TF5yaKwKNbcJyySYf+HAIFXxWltJOXE= +google.golang.org/genproto v0.0.0-20240528184218-531527333157/go.mod h1:ubQlAQnzejB8uZzszhrTCU2Fyp6Vi7ZE5nn0c3W8+qQ= +google.golang.org/genproto/googleapis/api v0.0.0-20240521202816-d264139d666e h1:SkdGTrROJl2jRGT/Fxv5QUf9jtdKCQh4KQJXbXVLAi0= +google.golang.org/genproto/googleapis/api v0.0.0-20240521202816-d264139d666e/go.mod h1:LweJcLbyVij6rCex8YunD8DYR5VDonap/jYl3ZRxcIU= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157 h1:Zy9XzmMEflZ/MAaA7vNcoebnRAld7FsPW1EeBB7V0m8= +google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157/go.mod h1:EfXuqaE1J41VCDicxHzUDm+8rk+7ZdXzHV0IhO/I6s0= google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38= google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM= @@ -852,8 +852,8 @@ google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8= google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.63.2 h1:MUeiw1B2maTVZthpU5xvASfTh3LDbxHd6IJ6QQVU+xM= -google.golang.org/grpc v1.63.2/go.mod h1:WAX/8DgncnokcFUldAxq7GeB5DXHDbMF+lLvDomNkRA= +google.golang.org/grpc v1.64.0 h1:KH3VH9y/MgNQg1dE7b3XfVK0GsPSIzJwdF617gUSbvY= +google.golang.org/grpc v1.64.0/go.mod h1:oxjF8E3FBnjp+/gVFYdWacaLDx9na1aqy9oovLpxQYg= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= diff --git a/ingest/ledgerbackend/buffered_storage_backend.go b/ingest/ledgerbackend/buffered_storage_backend.go index 68e03ffcd3..bd2f541a4c 100644 --- a/ingest/ledgerbackend/buffered_storage_backend.go +++ b/ingest/ledgerbackend/buffered_storage_backend.go @@ -17,6 +17,8 @@ import ( // Ensure BufferedStorageBackend implements LedgerBackend var _ LedgerBackend = (*BufferedStorageBackend)(nil) +type BufferedStorageBackendFactory func(ctx context.Context, config BufferedStorageBackendConfig, dataStore datastore.DataStore) (*BufferedStorageBackend, error) + type BufferedStorageBackendConfig struct { BufferSize uint32 `toml:"buffer_size"` NumWorkers uint32 `toml:"num_workers"` diff --git a/services/horizon/cmd/db.go b/services/horizon/cmd/db.go index 97144267ce..8a61038b92 100644 --- a/services/horizon/cmd/db.go +++ b/services/horizon/cmd/db.go @@ -270,11 +270,6 @@ var ( ledgerBackendType ingest.LedgerBackendType ) -type StorageBackendConfig struct { - DataStoreConfig datastore.DataStoreConfig `toml:"datastore_config"` - BufferedStorageBackendConfig ledgerbackend.BufferedStorageBackendConfig `toml:"buffered_storage_backend_config"` -} - func ingestRangeCmdOpts() support.ConfigOptions { return support.ConfigOptions{ { @@ -384,7 +379,7 @@ var dbReingestRangeCmd = &cobra.Command{ } } - var storageBackendConfig StorageBackendConfig + var storageBackendConfig ingest.StorageBackendConfig if ledgerBackendType == ingest.BufferedStorageBackend { cfg, err := toml.LoadFile(storageBackendConfigPath) if err != nil { @@ -393,6 +388,8 @@ var dbReingestRangeCmd = &cobra.Command{ if err = cfg.Unmarshal(&storageBackendConfig); err != nil { return fmt.Errorf("error unmarshalling TOML config: %w", err) } + storageBackendConfig.BufferedStorageBackendFactory = ledgerbackend.NewBufferedStorageBackend + storageBackendConfig.DataStoreFactory = datastore.NewDataStore } err := horizon.ApplyFlags(globalConfig, globalFlags, horizon.ApplyOptions{RequireCaptiveCoreFullConfig: false, AlwaysIngest: false}) @@ -444,7 +441,7 @@ var dbFillGapsCmd = &cobra.Command{ withRange = true } - var storageBackendConfig StorageBackendConfig + var storageBackendConfig ingest.StorageBackendConfig if ledgerBackendType == ingest.BufferedStorageBackend { cfg, err := toml.LoadFile(storageBackendConfigPath) if err != nil { @@ -453,6 +450,8 @@ var dbFillGapsCmd = &cobra.Command{ if err = cfg.Unmarshal(&storageBackendConfig); err != nil { return fmt.Errorf("error unmarshalling TOML config: %w", err) } + storageBackendConfig.BufferedStorageBackendFactory = ledgerbackend.NewBufferedStorageBackend + storageBackendConfig.DataStoreFactory = datastore.NewDataStore } err := horizon.ApplyFlags(globalConfig, globalFlags, horizon.ApplyOptions{RequireCaptiveCoreFullConfig: false, AlwaysIngest: false}) @@ -478,7 +477,7 @@ var dbFillGapsCmd = &cobra.Command{ }, } -func runDBReingestRange(ledgerRanges []history.LedgerRange, reingestForce bool, parallelWorkers uint, config horizon.Config, storageBackendConfig StorageBackendConfig) error { +func runDBReingestRange(ledgerRanges []history.LedgerRange, reingestForce bool, parallelWorkers uint, config horizon.Config, storageBackendConfig ingest.StorageBackendConfig) error { var err error if reingestForce && parallelWorkers > 1 { @@ -506,8 +505,7 @@ func runDBReingestRange(ledgerRanges []history.LedgerRange, reingestForce bool, MaxLedgerPerFlush: maxLedgersPerFlush, SkipTxmeta: config.SkipTxmeta, LedgerBackendType: ledgerBackendType, - DataStoreConfig: storageBackendConfig.DataStoreConfig, - BufferedBackendConfig: storageBackendConfig.BufferedStorageBackendConfig, + StorageBackendConfig: storageBackendConfig, } if ingestConfig.HistorySession, err = db.Open("postgres", config.DatabaseURL); err != nil { diff --git a/services/horizon/internal/ingest/main.go b/services/horizon/internal/ingest/main.go index a70a31fdf9..69fcd8e209 100644 --- a/services/horizon/internal/ingest/main.go +++ b/services/horizon/internal/ingest/main.go @@ -100,6 +100,13 @@ func (s LedgerBackendType) String() string { return "" } +type StorageBackendConfig struct { + DataStoreConfig datastore.DataStoreConfig `toml:"datastore_config"` + DataStoreFactory datastore.DataStoreFactory + BufferedStorageBackendConfig ledgerbackend.BufferedStorageBackendConfig `toml:"buffered_storage_backend_config"` + BufferedStorageBackendFactory ledgerbackend.BufferedStorageBackendFactory +} + type Config struct { StellarCoreURL string CaptiveCoreBinaryPath string @@ -134,9 +141,8 @@ type Config struct { ReapConfig ReapConfig - LedgerBackendType LedgerBackendType - DataStoreConfig datastore.DataStoreConfig - BufferedBackendConfig ledgerbackend.BufferedStorageBackendConfig + LedgerBackendType LedgerBackendType + StorageBackendConfig StorageBackendConfig } const ( @@ -289,12 +295,12 @@ func NewSystem(config Config) (System, error) { if config.LedgerBackendType == BufferedStorageBackend { // Ingest from datastore var dataStore datastore.DataStore - dataStore, err = datastore.NewDataStore(context.Background(), config.DataStoreConfig) + dataStore, err = config.StorageBackendConfig.DataStoreFactory(context.Background(), config.StorageBackendConfig.DataStoreConfig) if err != nil { cancel() return nil, fmt.Errorf("failed to create datastore: %w", err) } - ledgerBackend, err = ledgerbackend.NewBufferedStorageBackend(ctx, config.BufferedBackendConfig, dataStore) + ledgerBackend, err = config.StorageBackendConfig.BufferedStorageBackendFactory(ctx, config.StorageBackendConfig.BufferedStorageBackendConfig, dataStore) if err != nil { cancel() return nil, fmt.Errorf("failed to create buffered storage backend: %w", err) diff --git a/services/horizon/internal/ingest/main_test.go b/services/horizon/internal/ingest/main_test.go index 4f0e220ebe..c3c278f778 100644 --- a/services/horizon/internal/ingest/main_test.go +++ b/services/horizon/internal/ingest/main_test.go @@ -17,6 +17,7 @@ import ( "github.com/stellar/go/ingest" "github.com/stellar/go/ingest/ledgerbackend" "github.com/stellar/go/services/horizon/internal/db2/history" + "github.com/stellar/go/support/datastore" "github.com/stellar/go/support/db" "github.com/stellar/go/support/errors" logpkg "github.com/stellar/go/support/log" @@ -110,6 +111,32 @@ func TestNewSystem(t *testing.T) { assert.Equal(t, system.maxLedgerPerFlush, MaxLedgersPerFlush) } +func TestNewSystemBuffered(t *testing.T) { + mockDataStore := &datastore.MockDataStore{} + bufferedStorageBackend := &ledgerbackend.BufferedStorageBackend{} + config := Config{ + HistorySession: &db.Session{DB: &sqlx.DB{}}, + HistoryArchiveURLs: []string{"https://history.stellar.org/prd/core-live/core_live_001"}, + CheckpointFrequency: 64, + LedgerBackendType: BufferedStorageBackend, + StorageBackendConfig: StorageBackendConfig{ + DataStoreConfig: datastore.DataStoreConfig{Type: "GCS", Params: map[string]string{"destination_bucket_path": "Test"}}, + BufferedStorageBackendConfig: ledgerbackend.BufferedStorageBackendConfig{NumWorkers: 1, BufferSize: 2}, + DataStoreFactory: func(ctx context.Context, datastoreConfig datastore.DataStoreConfig) (datastore.DataStore, error) { + return mockDataStore, nil + }, + BufferedStorageBackendFactory: func(ctx context.Context, config ledgerbackend.BufferedStorageBackendConfig, dataStore datastore.DataStore) (*ledgerbackend.BufferedStorageBackend, error) { + return bufferedStorageBackend, nil + }, + }, + } + + sIface, err := NewSystem(config) + assert.NoError(t, err) + system := sIface.(*system) + assert.Same(t, system.ledgerBackend, bufferedStorageBackend) +} + // Custom comparator function.This function is needed because structs in Go that contain function fields // cannot be directly compared using assert.Equal, so here we compare each individual field, skipping the function fields. func CompareConfigs(t *testing.T, expected, actual Config) bool { diff --git a/support/datastore/datastore.go b/support/datastore/datastore.go index e4f41920e8..e91b3f009d 100644 --- a/support/datastore/datastore.go +++ b/support/datastore/datastore.go @@ -13,6 +13,8 @@ type DataStoreConfig struct { Schema DataStoreSchema `toml:"schema"` } +type DataStoreFactory func(ctx context.Context, datastoreConfig DataStoreConfig) (DataStore, error) + // DataStore defines an interface for interacting with data storage type DataStore interface { GetFileMetadata(ctx context.Context, path string) (map[string]string, error) From 7a267a6b3e153f35555664a0a645e41b7d7fac7d Mon Sep 17 00:00:00 2001 From: Shawn Reuland Date: Tue, 9 Jul 2024 09:37:42 -0700 Subject: [PATCH 06/24] #4911: fixed lexie integration tests to use datastore factory function --- .../ledgerexporter/internal/integration_test.go | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/exp/services/ledgerexporter/internal/integration_test.go b/exp/services/ledgerexporter/internal/integration_test.go index dab2e5b5f8..ccc5463908 100644 --- a/exp/services/ledgerexporter/internal/integration_test.go +++ b/exp/services/ledgerexporter/internal/integration_test.go @@ -34,6 +34,7 @@ const ( // tests then refer to ledger sequences only up to this, therefore // don't have to do complex waiting within test for a sequence to exist. waitForCoreLedgerSequence = 16 + configTemplate = "test/integration_config_template.toml" ) func TestLedgerExporterTestSuite(t *testing.T) { @@ -54,6 +55,7 @@ type LedgerExporterTestSuite struct { dockerCli *client.Client gcsServer *fakestorage.Server finishedSetup bool + config Config } func (s *LedgerExporterTestSuite) TestScanAndFill() { @@ -74,7 +76,7 @@ func (s *LedgerExporterTestSuite) TestScanAndFill() { s.T().Log(output) s.T().Log(errOutput) - datastore, err := datastore.NewGCSDataStore(s.ctx, "integration-test/standalone") + datastore, err := datastore.NewDataStore(s.ctx, s.config.DataStoreConfig) require.NoError(err) _, err = datastore.GetFile(s.ctx, "FFFFFFFF--0-9/FFFFFFFA--5.xdr.zstd") @@ -104,7 +106,7 @@ func (s *LedgerExporterTestSuite) TestAppend() { s.T().Log(output) s.T().Log(errOutput) - datastore, err := datastore.NewGCSDataStore(s.ctx, "integration-test/standalone") + datastore, err := datastore.NewDataStore(s.ctx, s.config.DataStoreConfig) require.NoError(err) _, err = datastore.GetFile(s.ctx, "FFFFFFFF--0-9/FFFFFFF6--9.xdr.zstd") @@ -134,7 +136,7 @@ func (s *LedgerExporterTestSuite) TestAppendUnbounded() { s.T().Log(errOutput) }() - datastore, err := datastore.NewGCSDataStore(s.ctx, "integration-test/standalone") + datastore, err := datastore.NewDataStore(s.ctx, s.config.DataStoreConfig) require.NoError(err) require.EventuallyWithT(func(c *assert.CollectT) { @@ -158,9 +160,9 @@ func (s *LedgerExporterTestSuite) SetupSuite() { }() testTempDir := t.TempDir() - ledgerExporterConfigTemplate, err := toml.LoadFile("test/integration_config_template.toml") + ledgerExporterConfigTemplate, err := toml.LoadFile(configTemplate) if err != nil { - t.Fatalf("unable to load config template file %v", err) + t.Fatalf("unable to load config template file %v, %v", configTemplate, err) } // if LEDGEREXPORTER_INTEGRATION_TESTS_CAPTIVE_CORE_BIN not specified, @@ -172,7 +174,10 @@ func (s *LedgerExporterTestSuite) SetupSuite() { tomlBytes, err := toml.Marshal(ledgerExporterConfigTemplate) if err != nil { - t.Fatalf("unable to load config file %v", err) + t.Fatalf("unable to parse config file toml %v, %v", configTemplate, err) + } + if err = toml.Unmarshal(tomlBytes, &s.config); err != nil { + t.Fatalf("unable to marshal config file toml into struct, %v", err) } tempSeedDataPath := filepath.Join(testTempDir, "data") From a873570b734539a9d6154df768976eaf67fb2df2 Mon Sep 17 00:00:00 2001 From: urvisavla Date: Tue, 9 Jul 2024 13:55:29 -0700 Subject: [PATCH 07/24] services/horizon: Skip captive-core configuration when reingesting from datastore (#5380) * services/horizon: Reingest from precomputed TxMeta * Global network config for reingestion * Remove AlwaysIngest option since Ingest is now true by default --- services/horizon/cmd/db.go | 10 +- services/horizon/cmd/ingest.go | 11 +- services/horizon/internal/flags.go | 129 ++++++++---------- services/horizon/internal/flags_test.go | 99 ++++++++------ .../internal/integration/parameters_test.go | 9 +- 5 files changed, 128 insertions(+), 130 deletions(-) diff --git a/services/horizon/cmd/db.go b/services/horizon/cmd/db.go index 8a61038b92..2769273493 100644 --- a/services/horizon/cmd/db.go +++ b/services/horizon/cmd/db.go @@ -225,7 +225,7 @@ var dbReapCmd = &cobra.Command{ Long: "reap removes any historical data that is earlier than the configured retention cutoff", RunE: func(cmd *cobra.Command, args []string) error { - err := horizon.ApplyFlags(globalConfig, globalFlags, horizon.ApplyOptions{RequireCaptiveCoreFullConfig: false, AlwaysIngest: false}) + err := horizon.ApplyFlags(globalConfig, globalFlags, horizon.ApplyOptions{RequireCaptiveCoreFullConfig: false}) if err != nil { return err } @@ -380,6 +380,7 @@ var dbReingestRangeCmd = &cobra.Command{ } var storageBackendConfig ingest.StorageBackendConfig + options := horizon.ApplyOptions{RequireCaptiveCoreFullConfig: false} if ledgerBackendType == ingest.BufferedStorageBackend { cfg, err := toml.LoadFile(storageBackendConfigPath) if err != nil { @@ -390,9 +391,10 @@ var dbReingestRangeCmd = &cobra.Command{ } storageBackendConfig.BufferedStorageBackendFactory = ledgerbackend.NewBufferedStorageBackend storageBackendConfig.DataStoreFactory = datastore.NewDataStore + options.NoCaptiveCore = true } - err := horizon.ApplyFlags(globalConfig, globalFlags, horizon.ApplyOptions{RequireCaptiveCoreFullConfig: false, AlwaysIngest: false}) + err := horizon.ApplyFlags(globalConfig, globalFlags, options) if err != nil { return err } @@ -442,6 +444,7 @@ var dbFillGapsCmd = &cobra.Command{ } var storageBackendConfig ingest.StorageBackendConfig + options := horizon.ApplyOptions{RequireCaptiveCoreFullConfig: false} if ledgerBackendType == ingest.BufferedStorageBackend { cfg, err := toml.LoadFile(storageBackendConfigPath) if err != nil { @@ -452,9 +455,10 @@ var dbFillGapsCmd = &cobra.Command{ } storageBackendConfig.BufferedStorageBackendFactory = ledgerbackend.NewBufferedStorageBackend storageBackendConfig.DataStoreFactory = datastore.NewDataStore + options.NoCaptiveCore = true } - err := horizon.ApplyFlags(globalConfig, globalFlags, horizon.ApplyOptions{RequireCaptiveCoreFullConfig: false, AlwaysIngest: false}) + err := horizon.ApplyFlags(globalConfig, globalFlags, options) if err != nil { return err } diff --git a/services/horizon/cmd/ingest.go b/services/horizon/cmd/ingest.go index 864067da8f..f6b94a8f52 100644 --- a/services/horizon/cmd/ingest.go +++ b/services/horizon/cmd/ingest.go @@ -9,6 +9,7 @@ import ( "github.com/spf13/cobra" "github.com/spf13/viper" + "github.com/stellar/go/historyarchive" horizon "github.com/stellar/go/services/horizon/internal" "github.com/stellar/go/services/horizon/internal/db2/history" @@ -94,7 +95,7 @@ var ingestVerifyRangeCmd = &cobra.Command{ co.SetValue() } - if err := horizon.ApplyFlags(globalConfig, globalFlags, horizon.ApplyOptions{RequireCaptiveCoreFullConfig: false, AlwaysIngest: true}); err != nil { + if err := horizon.ApplyFlags(globalConfig, globalFlags, horizon.ApplyOptions{RequireCaptiveCoreFullConfig: false}); err != nil { return err } @@ -189,7 +190,7 @@ var ingestStressTestCmd = &cobra.Command{ co.SetValue() } - if err := horizon.ApplyFlags(globalConfig, globalFlags, horizon.ApplyOptions{RequireCaptiveCoreFullConfig: false, AlwaysIngest: true}); err != nil { + if err := horizon.ApplyFlags(globalConfig, globalFlags, horizon.ApplyOptions{RequireCaptiveCoreFullConfig: false}); err != nil { return err } @@ -239,7 +240,7 @@ var ingestTriggerStateRebuildCmd = &cobra.Command{ Short: "updates a database to trigger state rebuild, state will be rebuilt by a running Horizon instance, DO NOT RUN production DB, some endpoints will be unavailable until state is rebuilt", RunE: func(cmd *cobra.Command, args []string) error { ctx := context.Background() - if err := horizon.ApplyFlags(globalConfig, globalFlags, horizon.ApplyOptions{RequireCaptiveCoreFullConfig: false, AlwaysIngest: true}); err != nil { + if err := horizon.ApplyFlags(globalConfig, globalFlags, horizon.ApplyOptions{RequireCaptiveCoreFullConfig: false}); err != nil { return err } @@ -263,7 +264,7 @@ var ingestInitGenesisStateCmd = &cobra.Command{ Short: "ingests genesis state (ledger 1)", RunE: func(cmd *cobra.Command, args []string) error { ctx := context.Background() - if err := horizon.ApplyFlags(globalConfig, globalFlags, horizon.ApplyOptions{RequireCaptiveCoreFullConfig: false, AlwaysIngest: true}); err != nil { + if err := horizon.ApplyFlags(globalConfig, globalFlags, horizon.ApplyOptions{RequireCaptiveCoreFullConfig: false}); err != nil { return err } @@ -320,7 +321,7 @@ var ingestBuildStateCmd = &cobra.Command{ co.SetValue() } - if err := horizon.ApplyFlags(globalConfig, globalFlags, horizon.ApplyOptions{RequireCaptiveCoreFullConfig: false, AlwaysIngest: true}); err != nil { + if err := horizon.ApplyFlags(globalConfig, globalFlags, horizon.ApplyOptions{RequireCaptiveCoreFullConfig: false}); err != nil { return err } diff --git a/services/horizon/internal/flags.go b/services/horizon/internal/flags.go index 9930fee69f..38fca67576 100644 --- a/services/horizon/internal/flags.go +++ b/services/horizon/internal/flags.go @@ -832,7 +832,7 @@ func Flags() (*Config, support.ConfigOptions) { // NewAppFromFlags constructs a new Horizon App from the given command line flags func NewAppFromFlags(config *Config, flags support.ConfigOptions) (*App, error) { - err := ApplyFlags(config, flags, ApplyOptions{RequireCaptiveCoreFullConfig: true, AlwaysIngest: false}) + err := ApplyFlags(config, flags, ApplyOptions{RequireCaptiveCoreFullConfig: true}) if err != nil { return nil, err } @@ -850,30 +850,10 @@ func NewAppFromFlags(config *Config, flags support.ConfigOptions) (*App, error) } type ApplyOptions struct { - AlwaysIngest bool RequireCaptiveCoreFullConfig bool + NoCaptiveCore bool } -type networkConfig struct { - defaultConfig []byte - HistoryArchiveURLs []string - NetworkPassphrase string -} - -var ( - PubnetConf = networkConfig{ - defaultConfig: ledgerbackend.PubnetDefaultConfig, - HistoryArchiveURLs: network.PublicNetworkhistoryArchiveURLs, - NetworkPassphrase: network.PublicNetworkPassphrase, - } - - TestnetConf = networkConfig{ - defaultConfig: ledgerbackend.TestnetDefaultConfig, - HistoryArchiveURLs: network.TestNetworkhistoryArchiveURLs, - NetworkPassphrase: network.TestNetworkPassphrase, - } -) - // getCaptiveCoreBinaryPath retrieves the path of the Captive Core binary // Returns the path or an error if the binary is not found func getCaptiveCoreBinaryPath() (string, error) { @@ -884,69 +864,32 @@ func getCaptiveCoreBinaryPath() (string, error) { return result, nil } -// getCaptiveCoreConfigFromNetworkParameter returns the default Captive Core configuration based on the network. -func getCaptiveCoreConfigFromNetworkParameter(config *Config) (networkConfig, error) { - var defaultNetworkConfig networkConfig - - if config.NetworkPassphrase != "" { - return defaultNetworkConfig, fmt.Errorf("invalid config: %s parameter not allowed with the %s parameter", - NetworkPassphraseFlagName, NetworkFlagName) - } - - if len(config.HistoryArchiveURLs) > 0 { - return defaultNetworkConfig, fmt.Errorf("invalid config: %s parameter not allowed with the %s parameter", - HistoryArchiveURLsFlagName, NetworkFlagName) - } - - switch config.Network { - case StellarPubnet: - defaultNetworkConfig = PubnetConf - case StellarTestnet: - defaultNetworkConfig = TestnetConf - default: - return defaultNetworkConfig, fmt.Errorf("no default configuration found for network %s", config.Network) - } - - return defaultNetworkConfig, nil -} - // setCaptiveCoreConfiguration prepares configuration for the Captive Core func setCaptiveCoreConfiguration(config *Config, options ApplyOptions) error { stdLog.Println("Preparing captive core...") + var err error // If the user didn't specify a Stellar Core binary, we can check the // $PATH and possibly fill it in for them. if config.CaptiveCoreBinaryPath == "" { - var err error if config.CaptiveCoreBinaryPath, err = getCaptiveCoreBinaryPath(); err != nil { return fmt.Errorf("captive core requires %s", StellarCoreBinaryPathName) } } - var defaultNetworkConfig networkConfig - if config.Network != "" { - var err error - defaultNetworkConfig, err = getCaptiveCoreConfigFromNetworkParameter(config) - if err != nil { - return err - } - config.NetworkPassphrase = defaultNetworkConfig.NetworkPassphrase - config.HistoryArchiveURLs = defaultNetworkConfig.HistoryArchiveURLs - } else { - if config.NetworkPassphrase == "" { - return fmt.Errorf("%s must be set", NetworkPassphraseFlagName) - } + var defaultCaptiveCoreConfig []byte + switch config.Network { + case StellarPubnet: + defaultCaptiveCoreConfig = ledgerbackend.PubnetDefaultConfig + case StellarTestnet: - if len(config.HistoryArchiveURLs) == 0 { - return fmt.Errorf("%s must be set", HistoryArchiveURLsFlagName) - } + defaultCaptiveCoreConfig = ledgerbackend.TestnetDefaultConfig } config.CaptiveCoreTomlParams.CoreBinaryPath = config.CaptiveCoreBinaryPath config.CaptiveCoreTomlParams.HistoryArchiveURLs = config.HistoryArchiveURLs config.CaptiveCoreTomlParams.NetworkPassphrase = config.NetworkPassphrase - var err error if config.CaptiveCoreConfigPath != "" { config.CaptiveCoreToml, err = ledgerbackend.NewCaptiveCoreTomlFromFile(config.CaptiveCoreConfigPath, config.CaptiveCoreTomlParams) @@ -960,8 +903,8 @@ func setCaptiveCoreConfiguration(config *Config, options ApplyOptions) error { if err != nil { return errors.Wrap(err, "invalid captive core toml file") } - } else if len(defaultNetworkConfig.defaultConfig) != 0 { - config.CaptiveCoreToml, err = ledgerbackend.NewCaptiveCoreTomlFromData(defaultNetworkConfig.defaultConfig, + } else if len(defaultCaptiveCoreConfig) != 0 { + config.CaptiveCoreToml, err = ledgerbackend.NewCaptiveCoreTomlFromData(defaultCaptiveCoreConfig, config.CaptiveCoreTomlParams) if err != nil { return errors.Wrap(err, "invalid captive core toml file") @@ -1004,10 +947,6 @@ func ApplyFlags(config *Config, flags support.ConfigOptions, options ApplyOption return err } - if options.AlwaysIngest { - config.Ingest = true - } - if config.Ingest { // Migrations should be checked as early as possible. Apply and check // only on ingesting instances which are required to have write-access @@ -1023,9 +962,15 @@ func ApplyFlags(config *Config, flags support.ConfigOptions, options ApplyOption return err } - err := setCaptiveCoreConfiguration(config, options) - if err != nil { - return errors.Wrap(err, "error generating captive core configuration") + if err := setNetworkConfiguration(config); err != nil { + return err + } + + if !options.NoCaptiveCore { + err := setCaptiveCoreConfiguration(config, options) + if err != nil { + return errors.Wrap(err, "error generating captive core configuration") + } } } @@ -1061,3 +1006,37 @@ func ApplyFlags(config *Config, flags support.ConfigOptions, options ApplyOption return nil } + +func setNetworkConfiguration(config *Config) error { + if config.Network != "" { + if config.NetworkPassphrase != "" { + return fmt.Errorf("invalid config: %s parameter not allowed with the %s parameter", + NetworkPassphraseFlagName, NetworkFlagName) + } + + if len(config.HistoryArchiveURLs) > 0 { + return fmt.Errorf("invalid config: %s parameter not allowed with the %s parameter", + HistoryArchiveURLsFlagName, NetworkFlagName) + } + + switch config.Network { + case StellarPubnet: + config.NetworkPassphrase = network.PublicNetworkPassphrase + config.HistoryArchiveURLs = network.PublicNetworkhistoryArchiveURLs + case StellarTestnet: + config.NetworkPassphrase = network.TestNetworkPassphrase + config.HistoryArchiveURLs = network.TestNetworkhistoryArchiveURLs + default: + return fmt.Errorf("no default configuration found for network %s", config.Network) + } + } + + if config.NetworkPassphrase == "" { + return fmt.Errorf("%s must be set", NetworkPassphraseFlagName) + } + + if len(config.HistoryArchiveURLs) == 0 { + return fmt.Errorf("%s must be set", HistoryArchiveURLsFlagName) + } + return nil +} diff --git a/services/horizon/internal/flags_test.go b/services/horizon/internal/flags_test.go index 76ec1ffd8d..4d8d352080 100644 --- a/services/horizon/internal/flags_test.go +++ b/services/horizon/internal/flags_test.go @@ -8,6 +8,7 @@ import ( "github.com/spf13/cobra" + "github.com/stellar/go/network" "github.com/stellar/go/services/horizon/internal/test" "github.com/stretchr/testify/assert" @@ -29,16 +30,16 @@ func Test_createCaptiveCoreDefaultConfig(t *testing.T) { config: Config{Network: StellarTestnet, CaptiveCoreBinaryPath: "/path/to/captive-core/binary", }, - networkPassphrase: TestnetConf.NetworkPassphrase, - historyArchiveURLs: TestnetConf.HistoryArchiveURLs, + networkPassphrase: network.TestNetworkPassphrase, + historyArchiveURLs: network.TestNetworkhistoryArchiveURLs, }, { name: "pubnet default config", config: Config{Network: StellarPubnet, CaptiveCoreBinaryPath: "/path/to/captive-core/binary", }, - networkPassphrase: PubnetConf.NetworkPassphrase, - historyArchiveURLs: PubnetConf.HistoryArchiveURLs, + networkPassphrase: network.PublicNetworkPassphrase, + historyArchiveURLs: network.PublicNetworkhistoryArchiveURLs, }, { name: "testnet validation; history archive urls supplied", @@ -83,18 +84,41 @@ func Test_createCaptiveCoreDefaultConfig(t *testing.T) { }, errStr: "no default configuration found for network unknown", }, + { + name: "no network specified; passphrase not supplied", + config: Config{ + HistoryArchiveURLs: []string{"HistoryArchiveURLs"}, + CaptiveCoreBinaryPath: "/path/to/captive-core/binary", + }, + errStr: fmt.Sprintf("%s must be set", NetworkPassphraseFlagName), + }, + { + name: "no network specified; history archive urls not supplied", + config: Config{ + NetworkPassphrase: "NetworkPassphrase", + CaptiveCoreBinaryPath: "/path/to/captive-core/binary", + }, + errStr: fmt.Sprintf("%s must be set", HistoryArchiveURLsFlagName), + }, + + { + name: "unknown network specified", + config: Config{Network: "unknown", + NetworkPassphrase: "", + HistoryArchiveURLs: []string{}, + CaptiveCoreBinaryPath: "/path/to/captive-core/binary", + }, + errStr: "no default configuration found for network unknown", + }, } for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { tt.config.CaptiveCoreTomlParams.UseDB = true - e := setCaptiveCoreConfiguration(&tt.config, - ApplyOptions{RequireCaptiveCoreFullConfig: true}) + e := setNetworkConfiguration(&tt.config) if tt.errStr == "" { assert.NoError(t, e) assert.Equal(t, tt.networkPassphrase, tt.config.NetworkPassphrase) assert.Equal(t, tt.historyArchiveURLs, tt.config.HistoryArchiveURLs) - assert.Equal(t, tt.networkPassphrase, tt.config.CaptiveCoreTomlParams.NetworkPassphrase) - assert.Equal(t, tt.historyArchiveURLs, tt.config.CaptiveCoreTomlParams.HistoryArchiveURLs) } else { assert.Equal(t, tt.errStr, e.Error()) } @@ -102,53 +126,50 @@ func Test_createCaptiveCoreDefaultConfig(t *testing.T) { } } -func Test_createCaptiveCoreConfig(t *testing.T) { - - var errorMsgConfig = "%s must be set" +func TestSetCaptiveCoreConfig(t *testing.T) { tests := []struct { name string requireCaptiveCoreConfig bool config Config - networkPassphrase string - historyArchiveURLs []string errStr string }{ { - name: "no network specified; valid parameters", + name: "testnet default config", requireCaptiveCoreConfig: true, config: Config{ - NetworkPassphrase: PubnetConf.NetworkPassphrase, - HistoryArchiveURLs: PubnetConf.HistoryArchiveURLs, - CaptiveCoreConfigPath: "../../../ingest/ledgerbackend/configs/captive-core-pubnet.cfg", + Network: StellarTestnet, + NetworkPassphrase: network.TestNetworkPassphrase, + HistoryArchiveURLs: network.TestNetworkhistoryArchiveURLs, CaptiveCoreBinaryPath: "/path/to/captive-core/binary", }, - networkPassphrase: PubnetConf.NetworkPassphrase, - historyArchiveURLs: PubnetConf.HistoryArchiveURLs, }, { - name: "no network specified; passphrase not supplied", + name: "pubnet default config", requireCaptiveCoreConfig: true, config: Config{ - HistoryArchiveURLs: []string{"HistoryArchiveURLs"}, + Network: StellarPubnet, + NetworkPassphrase: network.PublicNetworkPassphrase, + HistoryArchiveURLs: network.PublicNetworkhistoryArchiveURLs, CaptiveCoreBinaryPath: "/path/to/captive-core/binary", }, - errStr: fmt.Sprintf(errorMsgConfig, NetworkPassphraseFlagName), }, { - name: "no network specified; history archive urls not supplied", + name: "no network specified; valid parameters", requireCaptiveCoreConfig: true, config: Config{ - NetworkPassphrase: "NetworkPassphrase", + NetworkPassphrase: network.PublicNetworkPassphrase, + HistoryArchiveURLs: network.PublicNetworkhistoryArchiveURLs, + CaptiveCoreConfigPath: "../../../ingest/ledgerbackend/configs/captive-core-pubnet.cfg", CaptiveCoreBinaryPath: "/path/to/captive-core/binary", }, - errStr: fmt.Sprintf(errorMsgConfig, HistoryArchiveURLsFlagName), }, + { name: "no network specified; captive-core-config-path not supplied", requireCaptiveCoreConfig: true, config: Config{ - NetworkPassphrase: PubnetConf.NetworkPassphrase, - HistoryArchiveURLs: PubnetConf.HistoryArchiveURLs, + NetworkPassphrase: network.PublicNetworkPassphrase, + HistoryArchiveURLs: network.PublicNetworkhistoryArchiveURLs, CaptiveCoreBinaryPath: "/path/to/captive-core/binary", }, errStr: fmt.Sprintf("invalid config: captive core requires that --%s is set or "+ @@ -158,8 +179,8 @@ func Test_createCaptiveCoreConfig(t *testing.T) { name: "no network specified; captive-core-config-path invalid file", requireCaptiveCoreConfig: true, config: Config{ - NetworkPassphrase: PubnetConf.NetworkPassphrase, - HistoryArchiveURLs: PubnetConf.HistoryArchiveURLs, + NetworkPassphrase: network.PublicNetworkPassphrase, + HistoryArchiveURLs: network.PublicNetworkhistoryArchiveURLs, CaptiveCoreConfigPath: "xyz.cfg", CaptiveCoreBinaryPath: "/path/to/captive-core/binary", }, @@ -170,25 +191,21 @@ func Test_createCaptiveCoreConfig(t *testing.T) { name: "no network specified; captive-core-config-path incorrect config", requireCaptiveCoreConfig: true, config: Config{ - NetworkPassphrase: PubnetConf.NetworkPassphrase, - HistoryArchiveURLs: PubnetConf.HistoryArchiveURLs, + NetworkPassphrase: network.PublicNetworkPassphrase, + HistoryArchiveURLs: network.PublicNetworkhistoryArchiveURLs, CaptiveCoreConfigPath: "../../../ingest/ledgerbackend/configs/captive-core-testnet.cfg", CaptiveCoreBinaryPath: "/path/to/captive-core/binary", }, errStr: fmt.Sprintf("invalid captive core toml file: invalid captive core toml: "+ "NETWORK_PASSPHRASE in captive core config file: %s does not match Horizon "+ - "network-passphrase flag: %s", TestnetConf.NetworkPassphrase, PubnetConf.NetworkPassphrase), + "network-passphrase flag: %s", network.TestNetworkPassphrase, network.PublicNetworkPassphrase), }, { - name: "no network specified; captive-core-config not required", + name: "no network specified; full captive-core-config not required", requireCaptiveCoreConfig: false, config: Config{ - NetworkPassphrase: PubnetConf.NetworkPassphrase, - HistoryArchiveURLs: PubnetConf.HistoryArchiveURLs, CaptiveCoreBinaryPath: "/path/to/captive-core/binary", }, - networkPassphrase: PubnetConf.NetworkPassphrase, - historyArchiveURLs: PubnetConf.HistoryArchiveURLs, }, } for _, tt := range tests { @@ -198,10 +215,6 @@ func Test_createCaptiveCoreConfig(t *testing.T) { ApplyOptions{RequireCaptiveCoreFullConfig: tt.requireCaptiveCoreConfig}) if tt.errStr == "" { assert.NoError(t, e) - assert.Equal(t, tt.networkPassphrase, tt.config.NetworkPassphrase) - assert.Equal(t, tt.historyArchiveURLs, tt.config.HistoryArchiveURLs) - assert.Equal(t, tt.networkPassphrase, tt.config.CaptiveCoreTomlParams.NetworkPassphrase) - assert.Equal(t, tt.historyArchiveURLs, tt.config.CaptiveCoreTomlParams.HistoryArchiveURLs) } else { require.Error(t, e) assert.Equal(t, tt.errStr, e.Error()) @@ -261,7 +274,7 @@ func TestClientQueryTimeoutFlag(t *testing.T) { if err := flags.Init(horizonCmd); err != nil { require.NoError(t, err) } - if err := ApplyFlags(config, flags, ApplyOptions{RequireCaptiveCoreFullConfig: true, AlwaysIngest: false}); err != nil { + if err := ApplyFlags(config, flags, ApplyOptions{RequireCaptiveCoreFullConfig: true}); err != nil { require.EqualError(t, err, testCase.err) } else { require.Empty(t, testCase.err) @@ -293,7 +306,7 @@ func TestEnvironmentVariables(t *testing.T) { if err := flags.Init(horizonCmd); err != nil { fmt.Println(err) } - if err := ApplyFlags(config, flags, ApplyOptions{RequireCaptiveCoreFullConfig: true, AlwaysIngest: false}); err != nil { + if err := ApplyFlags(config, flags, ApplyOptions{RequireCaptiveCoreFullConfig: true}); err != nil { fmt.Println(err) } assert.Equal(t, config.Ingest, false) diff --git a/services/horizon/internal/integration/parameters_test.go b/services/horizon/internal/integration/parameters_test.go index 133950d6f3..f50647abc7 100644 --- a/services/horizon/internal/integration/parameters_test.go +++ b/services/horizon/internal/integration/parameters_test.go @@ -16,6 +16,7 @@ import ( "github.com/spf13/cobra" + "github.com/stellar/go/network" "github.com/stellar/go/services/horizon/internal/paths" "github.com/stellar/go/services/horizon/internal/simplepath" @@ -186,13 +187,13 @@ func TestNetworkParameter(t *testing.T) { }{ { networkValue: horizon.StellarTestnet, - networkPassphrase: horizon.TestnetConf.NetworkPassphrase, - historyArchiveURLs: horizon.TestnetConf.HistoryArchiveURLs, + networkPassphrase: network.TestNetworkPassphrase, + historyArchiveURLs: network.TestNetworkhistoryArchiveURLs, }, { networkValue: horizon.StellarPubnet, - networkPassphrase: horizon.PubnetConf.NetworkPassphrase, - historyArchiveURLs: horizon.PubnetConf.HistoryArchiveURLs, + networkPassphrase: network.PublicNetworkPassphrase, + historyArchiveURLs: network.PublicNetworkhistoryArchiveURLs, }, } for _, tt := range testCases { From 4491049e8a5d0a5e179014e9215a74f15e7d2e80 Mon Sep 17 00:00:00 2001 From: shawn Date: Tue, 9 Jul 2024 19:44:10 -0700 Subject: [PATCH 08/24] #4911: added --parallel-job-size=100 default when datastore backend used (#5379) --- services/horizon/cmd/db.go | 11 ++- services/horizon/cmd/db_test.go | 98 +++++++++++++++++++ .../internal/integration/parameters_test.go | 3 +- 3 files changed, 108 insertions(+), 4 deletions(-) create mode 100644 services/horizon/cmd/db_test.go diff --git a/services/horizon/cmd/db.go b/services/horizon/cmd/db.go index 2769273493..8f3e2ae9eb 100644 --- a/services/horizon/cmd/db.go +++ b/services/horizon/cmd/db.go @@ -27,6 +27,8 @@ import ( hlog "github.com/stellar/go/support/log" ) +var runDBReingestRangeFn = runDBReingestRange + var dbCmd = &cobra.Command{ Use: "db [command]", Short: "commands to manage horizon's postgres db", @@ -391,6 +393,11 @@ var dbReingestRangeCmd = &cobra.Command{ } storageBackendConfig.BufferedStorageBackendFactory = ledgerbackend.NewBufferedStorageBackend storageBackendConfig.DataStoreFactory = datastore.NewDataStore + // when using buffered storage, performance observations have noted optimal parallel batch size + // of 100, apply that as default if the flag was absent. + if !viper.IsSet("parallel-job-size") { + parallelJobSize = 100 + } options.NoCaptiveCore = true } @@ -398,7 +405,7 @@ var dbReingestRangeCmd = &cobra.Command{ if err != nil { return err } - return runDBReingestRange( + return runDBReingestRangeFn( []history.LedgerRange{{StartSequence: argsUInt32[0], EndSequence: argsUInt32[1]}}, reingestForce, parallelWorkers, @@ -477,7 +484,7 @@ var dbFillGapsCmd = &cobra.Command{ hlog.Infof("found gaps %v", gaps) } - return runDBReingestRange(gaps, reingestForce, parallelWorkers, *globalConfig, storageBackendConfig) + return runDBReingestRangeFn(gaps, reingestForce, parallelWorkers, *globalConfig, storageBackendConfig) }, } diff --git a/services/horizon/cmd/db_test.go b/services/horizon/cmd/db_test.go new file mode 100644 index 0000000000..3942f8d91a --- /dev/null +++ b/services/horizon/cmd/db_test.go @@ -0,0 +1,98 @@ +package cmd + +import ( + "testing" + + horizon "github.com/stellar/go/services/horizon/internal" + "github.com/stellar/go/services/horizon/internal/db2/history" + "github.com/stellar/go/services/horizon/internal/ingest" + "github.com/stellar/go/support/db/dbtest" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" +) + +func TestDBCommandsTestSuite(t *testing.T) { + dbCmdSuite := &DBCommandsTestSuite{} + suite.Run(t, dbCmdSuite) +} + +type DBCommandsTestSuite struct { + suite.Suite + dsn string +} + +func (s *DBCommandsTestSuite) SetupSuite() { + runDBReingestRangeFn = func([]history.LedgerRange, bool, uint, + horizon.Config, ingest.StorageBackendConfig) error { + return nil + } + + newDB := dbtest.Postgres(s.T()) + s.dsn = newDB.DSN + + RootCmd.SetArgs([]string{ + "db", "migrate", "up", "--db-url", s.dsn}) + require.NoError(s.T(), RootCmd.Execute()) +} + +func (s *DBCommandsTestSuite) TestDefaultParallelJobSizeForBufferedBackend() { + RootCmd.SetArgs([]string{ + "db", "reingest", "range", + "--db-url", s.dsn, + "--network", "testnet", + "--parallel-workers", "2", + "--ledgerbackend", "datastore", + "--datastore-config", "../config.storagebackend.toml", + "2", + "10"}) + + require.NoError(s.T(), dbReingestRangeCmd.Execute()) + require.Equal(s.T(), parallelJobSize, uint32(100)) +} + +func (s *DBCommandsTestSuite) TestDefaultParallelJobSizeForCaptiveBackend() { + RootCmd.SetArgs([]string{ + "db", "reingest", "range", + "--db-url", s.dsn, + "--network", "testnet", + "--stellar-core-binary-path", "/test/core/bin/path", + "--parallel-workers", "2", + "--ledgerbackend", "captive-core", + "2", + "10"}) + + require.NoError(s.T(), RootCmd.Execute()) + require.Equal(s.T(), parallelJobSize, uint32(100_000)) +} + +func (s *DBCommandsTestSuite) TestUsesParallelJobSizeWhenSetForCaptive() { + RootCmd.SetArgs([]string{ + "db", "reingest", "range", + "--db-url", s.dsn, + "--network", "testnet", + "--stellar-core-binary-path", "/test/core/bin/path", + "--parallel-workers", "2", + "--parallel-job-size", "5", + "--ledgerbackend", "captive-core", + "2", + "10"}) + + require.NoError(s.T(), RootCmd.Execute()) + require.Equal(s.T(), parallelJobSize, uint32(5)) +} + +func (s *DBCommandsTestSuite) TestUsesParallelJobSizeWhenSetForBuffered() { + RootCmd.SetArgs([]string{ + "db", "reingest", "range", + "--db-url", s.dsn, + "--network", "testnet", + "--parallel-workers", "2", + "--parallel-job-size", "5", + "--ledgerbackend", "datastore", + "--datastore-config", "../config.storagebackend.toml", + "2", + "10"}) + + require.NoError(s.T(), RootCmd.Execute()) + require.Equal(s.T(), parallelJobSize, uint32(5)) +} diff --git a/services/horizon/internal/integration/parameters_test.go b/services/horizon/internal/integration/parameters_test.go index f50647abc7..333ed744c6 100644 --- a/services/horizon/internal/integration/parameters_test.go +++ b/services/horizon/internal/integration/parameters_test.go @@ -127,8 +127,7 @@ func TestEnvironmentPreserved(t *testing.T) { // using NETWORK environment variables, history archive urls or network passphrase // parameters are also set. func TestInvalidNetworkParameters(t *testing.T) { - var captiveCoreConfigErrMsg = integration.HorizonInitErrStr + ": error generating captive " + - "core configuration: invalid config: %s parameter not allowed with the %s parameter" + var captiveCoreConfigErrMsg = integration.HorizonInitErrStr + ": invalid config: %s parameter not allowed with the %s parameter" testCases := []struct { name string errMsg string From 7234d6e4d95bc020a7d654aaebe627a2eaa25663 Mon Sep 17 00:00:00 2001 From: urvisavla Date: Thu, 11 Jul 2024 00:27:59 -0700 Subject: [PATCH 09/24] services/horizon: Add unit test for new --ledgerbackend flag (#5382) * services/horizon: Reingest from precomputed TxMeta * Global network config for reingestion * Add unit test for new --ledgerbackend flag * Fix unit test * Add unittest for network flags validation --- services/horizon/cmd/db_test.go | 177 +++++++++++++++++++++++++++++++- 1 file changed, 175 insertions(+), 2 deletions(-) diff --git a/services/horizon/cmd/db_test.go b/services/horizon/cmd/db_test.go index 3942f8d91a..dea3e3777f 100644 --- a/services/horizon/cmd/db_test.go +++ b/services/horizon/cmd/db_test.go @@ -3,12 +3,13 @@ package cmd import ( "testing" + "github.com/stretchr/testify/require" + "github.com/stretchr/testify/suite" + horizon "github.com/stellar/go/services/horizon/internal" "github.com/stellar/go/services/horizon/internal/db2/history" "github.com/stellar/go/services/horizon/internal/ingest" "github.com/stellar/go/support/db/dbtest" - "github.com/stretchr/testify/require" - "github.com/stretchr/testify/suite" ) func TestDBCommandsTestSuite(t *testing.T) { @@ -21,6 +22,20 @@ type DBCommandsTestSuite struct { dsn string } +func (s *DBCommandsTestSuite) SetupTest() { + resetFlags() +} + +func resetFlags() { + RootCmd.ResetFlags() + dbFillGapsCmd.ResetFlags() + dbReingestRangeCmd.ResetFlags() + + globalFlags.Init(RootCmd) + dbFillGapsCmdOpts.Init(dbFillGapsCmd) + dbReingestRangeCmdOpts.Init(dbReingestRangeCmd) +} + func (s *DBCommandsTestSuite) SetupSuite() { runDBReingestRangeFn = func([]history.LedgerRange, bool, uint, horizon.Config, ingest.StorageBackendConfig) error { @@ -96,3 +111,161 @@ func (s *DBCommandsTestSuite) TestUsesParallelJobSizeWhenSetForBuffered() { require.NoError(s.T(), RootCmd.Execute()) require.Equal(s.T(), parallelJobSize, uint32(5)) } + +func (s *DBCommandsTestSuite) TestDbReingestAndFillGapsCmds() { + tests := []struct { + name string + args []string + ledgerBackend ingest.LedgerBackendType + expectError bool + errorMessage string + }{ + { + name: "default; w/ individual network flags", + args: []string{ + "1", "100", + "--network-passphrase", "passphrase", + "--history-archive-urls", "[]", + }, + expectError: false, + }, + { + name: "default; w/o individual network flags", + args: []string{ + "1", "100", + }, + expectError: true, + errorMessage: "network-passphrase must be set", + }, + { + name: "default; no history-archive-urls flag", + args: []string{ + "1", "100", + "--network-passphrase", "passphrase", + }, + expectError: true, + errorMessage: "history-archive-urls must be set", + }, + { + name: "default; w/ network parameter", + args: []string{ + "1", "100", + "--network", "testnet", + }, + expectError: false, + }, + { + name: "datastore; w/ individual network flags", + args: []string{ + "1", "100", + "--ledgerbackend", "datastore", + "--datastore-config", "../config.storagebackend.toml", + "--network-passphrase", "passphrase", + "--history-archive-urls", "[]", + }, + expectError: false, + }, + { + name: "datastore; w/o individual network flags", + args: []string{ + "1", "100", + "--ledgerbackend", "datastore", + "--datastore-config", "../config.storagebackend.toml", + }, + expectError: true, + errorMessage: "network-passphrase must be set", + }, + { + name: "datastore; no history-archive-urls flag", + args: []string{ + "1", "100", + "--ledgerbackend", "datastore", + "--datastore-config", "../config.storagebackend.toml", + "--network-passphrase", "passphrase", + }, + expectError: true, + errorMessage: "history-archive-urls must be set", + }, + { + name: "captive-core; valid", + args: []string{ + "1", "100", + "--network", "testnet", + "--ledgerbackend", "captive-core", + }, + expectError: false, + }, + { + name: "invalid datastore", + args: []string{ + "1", "100", + "--network", "testnet", + "--ledgerbackend", "unknown", + }, + expectError: true, + errorMessage: "invalid ledger backend: unknown, must be 'captive-core' or 'datastore'", + }, + { + name: "datastore; missing config file", + args: []string{ + "1", "100", + "--network", "testnet", + "--ledgerbackend", "datastore", + "--datastore-config", "invalid.config.toml", + }, + expectError: true, + errorMessage: "failed to load config file", + }, + { + name: "datastore; w/ config", + args: []string{ + "1", "100", + "--network", "testnet", + "--ledgerbackend", "datastore", + "--datastore-config", "../config.storagebackend.toml", + }, + expectError: false, + }, + { + name: "datastore; w/o config", + args: []string{ + "1", "100", + "--network", "testnet", + "--ledgerbackend", "datastore", + }, + expectError: true, + errorMessage: "datastore config file is required for datastore backend type", + }, + } + + commands := []struct { + cmd []string + name string + }{ + {[]string{"db", "reingest", "range"}, "TestDbReingestRangeCmd"}, + {[]string{"db", "fill-gaps"}, "TestDbFillGapsCmd"}, + } + + for _, command := range commands { + for _, tt := range tests { + s.T().Run(tt.name+"_"+command.name, func(t *testing.T) { + resetFlags() + + var args []string + args = append(command.cmd, tt.args...) + RootCmd.SetArgs(append([]string{ + "--db-url", s.dsn, + "--stellar-core-binary-path", "/test/core/bin/path", + }, args...)) + + if tt.expectError { + err := RootCmd.Execute() + require.Error(t, err) + require.Contains(t, err.Error(), tt.errorMessage) + } else { + require.NoError(t, RootCmd.Execute()) + } + }) + } + } +} From 7dc9ffbe12cfabdf442ca75deab37b700480bc5f Mon Sep 17 00:00:00 2001 From: shawn Date: Fri, 12 Jul 2024 15:28:53 -0700 Subject: [PATCH 10/24] #4911: added test for reingest with --ledgerbackend datastore (#5383) --- go.mod | 1 + go.sum | 4 + services/horizon/cmd/db.go | 744 +++++++++--------- services/horizon/cmd/db_test.go | 65 +- services/horizon/cmd/root.go | 62 +- .../horizon/internal/integration/db_test.go | 208 ++++- .../testbucket/FFFFFC18--999.xdr.zstd | Bin 0 -> 1547 bytes .../testbucket/FFFFFC19--998.xdr.zstd | Bin 0 -> 2791 bytes .../testbucket/FFFFFC1A--997.xdr.zstd | Bin 0 -> 2814 bytes 9 files changed, 639 insertions(+), 445 deletions(-) create mode 100644 services/horizon/internal/integration/testdata/testbucket/FFFFFC18--999.xdr.zstd create mode 100644 services/horizon/internal/integration/testdata/testbucket/FFFFFC19--998.xdr.zstd create mode 100644 services/horizon/internal/integration/testdata/testbucket/FFFFFC1A--997.xdr.zstd diff --git a/go.mod b/go.mod index 531a7ecd3f..ce6761d49e 100644 --- a/go.mod +++ b/go.mod @@ -63,6 +63,7 @@ require ( github.com/docker/docker v27.0.3+incompatible github.com/docker/go-connections v0.5.0 github.com/fsouza/fake-gcs-server v1.49.2 + github.com/otiai10/copy v1.14.0 ) require ( diff --git a/go.sum b/go.sum index 13d3a0acf0..4b3d6b186e 100644 --- a/go.sum +++ b/go.sum @@ -401,6 +401,10 @@ github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQ github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM= github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/otiai10/copy v1.14.0 h1:dCI/t1iTdYGtkvCuBG2BgR6KZa83PTclw4U5n2wAllU= +github.com/otiai10/copy v1.14.0/go.mod h1:ECfuL02W+/FkTWZWgQqXPWZgW9oeKCSQ5qVfSc4qc4w= +github.com/otiai10/mint v1.5.1 h1:XaPLeE+9vGbuyEHem1JNk3bYc7KKqyI/na0/mLd/Kks= +github.com/otiai10/mint v1.5.1/go.mod h1:MJm72SBthJjz8qhefc4z1PYEieWmy8Bku7CjcAqyUSM= github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/pelletier/go-toml/v2 v2.1.0 h1:FnwAJ4oYMvbT/34k9zzHuZNrhlz48GB3/s6at6/MHO4= diff --git a/services/horizon/cmd/db.go b/services/horizon/cmd/db.go index 8f3e2ae9eb..a07bc0ee10 100644 --- a/services/horizon/cmd/db.go +++ b/services/horizon/cmd/db.go @@ -20,6 +20,7 @@ import ( "github.com/stellar/go/services/horizon/internal/db2/history" "github.com/stellar/go/services/horizon/internal/db2/schema" "github.com/stellar/go/services/horizon/internal/ingest" + "github.com/stellar/go/support/config" support "github.com/stellar/go/support/config" "github.com/stellar/go/support/datastore" "github.com/stellar/go/support/db" @@ -27,24 +28,36 @@ import ( hlog "github.com/stellar/go/support/log" ) -var runDBReingestRangeFn = runDBReingestRange - -var dbCmd = &cobra.Command{ - Use: "db [command]", - Short: "commands to manage horizon's postgres db", -} - -var dbMigrateCmd = &cobra.Command{ - Use: "migrate [command]", - Short: "commands to run schema migrations on horizon's postgres db", -} +var ( + runDBReingestRangeFn = runDBReingestRange + dbCmd *cobra.Command + dbMigrateCmd *cobra.Command + dbInitCmd *cobra.Command + dbMigrateDownCmd *cobra.Command + dbMigrateRedoCmd *cobra.Command + dbMigrateStatusCmd *cobra.Command + dbMigrateUpCmd *cobra.Command + dbReapCmd *cobra.Command + dbReingestCmd *cobra.Command + dbReingestRangeCmd *cobra.Command + dbFillGapsCmd *cobra.Command + dbDetectGapsCmd *cobra.Command + reingestForce bool + parallelWorkers uint + parallelJobSize uint32 + retries uint + retryBackoffSeconds uint + ledgerBackendStr string + storageBackendConfigPath string + ledgerBackendType ingest.LedgerBackendType +) -func requireAndSetFlags(names ...string) error { +func requireAndSetFlags(horizonFlags config.ConfigOptions, names ...string) error { set := map[string]bool{} for _, name := range names { set[name] = true } - for _, flag := range globalFlags { + for _, flag := range horizonFlags { if set[flag.Name] { flag.Require() if err := flag.SetValue(); err != nil { @@ -63,44 +76,17 @@ func requireAndSetFlags(names ...string) error { return fmt.Errorf("could not find %s flags", strings.Join(missing, ",")) } -var dbInitCmd = &cobra.Command{ - Use: "init", - Short: "install schema", - Long: "init initializes the postgres database used by horizon.", - RunE: func(cmd *cobra.Command, args []string) error { - if err := requireAndSetFlags(horizon.DatabaseURLFlagName, horizon.IngestFlagName); err != nil { - return err - } - - db, err := sql.Open("postgres", globalConfig.DatabaseURL) - if err != nil { - return err - } - - numMigrationsRun, err := schema.Migrate(db, schema.MigrateUp, 0) - if err != nil { - return err - } - - if numMigrationsRun == 0 { - log.Println("No migrations applied.") - } else { - log.Printf("Successfully applied %d migrations.\n", numMigrationsRun) - } - return nil - }, -} - -func migrate(dir schema.MigrateDir, count int) error { - if !globalConfig.Ingest { +func migrate(dir schema.MigrateDir, count int, horizonConfig *horizon.Config) error { + if !horizonConfig.Ingest { log.Println("Skipping migrations because ingest flag is not enabled") return nil } - dbConn, err := db.Open("postgres", globalConfig.DatabaseURL) + dbConn, err := db.Open("postgres", horizonConfig.DatabaseURL) if err != nil { return err } + defer dbConn.Close() numMigrationsRun, err := schema.Migrate(dbConn.DB.DB, dir, count) if err != nil { @@ -115,163 +101,6 @@ func migrate(dir schema.MigrateDir, count int) error { return nil } -var dbMigrateDownCmd = &cobra.Command{ - Use: "down COUNT", - Short: "run downwards db schema migrations", - Long: "performs a downards schema migration command", - RunE: func(cmd *cobra.Command, args []string) error { - if err := requireAndSetFlags(horizon.DatabaseURLFlagName, horizon.IngestFlagName); err != nil { - return err - } - - // Only allow invocations with 1 args. - if len(args) != 1 { - return ErrUsage{cmd} - } - - count, err := strconv.Atoi(args[0]) - if err != nil { - log.Println(err) - return ErrUsage{cmd} - } - - return migrate(schema.MigrateDown, count) - }, -} - -var dbMigrateRedoCmd = &cobra.Command{ - Use: "redo COUNT", - Short: "redo db schema migrations", - Long: "performs a redo schema migration command", - RunE: func(cmd *cobra.Command, args []string) error { - if err := requireAndSetFlags(horizon.DatabaseURLFlagName, horizon.IngestFlagName); err != nil { - return err - } - - // Only allow invocations with 1 args. - if len(args) != 1 { - return ErrUsage{cmd} - } - - count, err := strconv.Atoi(args[0]) - if err != nil { - log.Println(err) - return ErrUsage{cmd} - } - - return migrate(schema.MigrateRedo, count) - }, -} - -var dbMigrateStatusCmd = &cobra.Command{ - Use: "status", - Short: "print current database migration status", - Long: "print current database migration status", - RunE: func(cmd *cobra.Command, args []string) error { - if err := requireAndSetFlags(horizon.DatabaseURLFlagName); err != nil { - return err - } - - // Only allow invocations with 0 args. - if len(args) != 0 { - fmt.Println(args) - return ErrUsage{cmd} - } - - dbConn, err := db.Open("postgres", globalConfig.DatabaseURL) - if err != nil { - return err - } - - status, err := schema.Status(dbConn.DB.DB) - if err != nil { - return err - } - - fmt.Println(status) - return nil - }, -} - -var dbMigrateUpCmd = &cobra.Command{ - Use: "up [COUNT]", - Short: "run upwards db schema migrations", - Long: "performs an upwards schema migration command", - RunE: func(cmd *cobra.Command, args []string) error { - if err := requireAndSetFlags(horizon.DatabaseURLFlagName, horizon.IngestFlagName); err != nil { - return err - } - - // Only allow invocations with 0-1 args. - if len(args) > 1 { - return ErrUsage{cmd} - } - - count := 0 - if len(args) == 1 { - var err error - count, err = strconv.Atoi(args[0]) - if err != nil { - log.Println(err) - return ErrUsage{cmd} - } - } - - return migrate(schema.MigrateUp, count) - }, -} - -var dbReapCmd = &cobra.Command{ - Use: "reap", - Short: "reaps (i.e. removes) any reapable history data", - Long: "reap removes any historical data that is earlier than the configured retention cutoff", - RunE: func(cmd *cobra.Command, args []string) error { - - err := horizon.ApplyFlags(globalConfig, globalFlags, horizon.ApplyOptions{RequireCaptiveCoreFullConfig: false}) - if err != nil { - return err - } - - session, err := db.Open("postgres", globalConfig.DatabaseURL) - if err != nil { - return fmt.Errorf("cannot open Horizon DB: %v", err) - } - defer session.Close() - - reaper := ingest.NewReaper( - ingest.ReapConfig{ - RetentionCount: uint32(globalConfig.HistoryRetentionCount), - BatchSize: uint32(globalConfig.HistoryRetentionReapCount), - }, - session, - ) - ctx, cancel := signal.NotifyContext(context.Background(), os.Interrupt, os.Kill) - defer cancel() - return reaper.DeleteUnretainedHistory(ctx) - }, -} - -var dbReingestCmd = &cobra.Command{ - Use: "reingest", - Short: "reingest commands", - Long: "reingest ingests historical data for every ledger or ledgers specified by subcommand", - RunE: func(cmd *cobra.Command, args []string) error { - fmt.Println("Use one of the subcomands...") - return ErrUsage{cmd} - }, -} - -var ( - reingestForce bool - parallelWorkers uint - parallelJobSize uint32 - retries uint - retryBackoffSeconds uint - ledgerBackendStr string - storageBackendConfigPath string - ledgerBackendType ingest.LedgerBackendType -) - func ingestRangeCmdOpts() support.ConfigOptions { return support.ConfigOptions{ { @@ -355,138 +184,7 @@ func ingestRangeCmdOpts() support.ConfigOptions { } var dbReingestRangeCmdOpts = ingestRangeCmdOpts() -var dbReingestRangeCmd = &cobra.Command{ - Use: "range [Start sequence number] [End sequence number]", - Short: "reingests ledgers within a range", - Long: "reingests ledgers between X and Y sequence number (closed intervals)", - RunE: func(cmd *cobra.Command, args []string) error { - if err := dbReingestRangeCmdOpts.RequireE(); err != nil { - return err - } - if err := dbReingestRangeCmdOpts.SetValues(); err != nil { - return err - } - - if len(args) != 2 { - return ErrUsage{cmd} - } - - argsUInt32 := make([]uint32, 2) - for i, arg := range args { - if seq, err := strconv.ParseUint(arg, 10, 32); err != nil { - cmd.Usage() - return fmt.Errorf(`invalid sequence number "%s"`, arg) - } else { - argsUInt32[i] = uint32(seq) - } - } - - var storageBackendConfig ingest.StorageBackendConfig - options := horizon.ApplyOptions{RequireCaptiveCoreFullConfig: false} - if ledgerBackendType == ingest.BufferedStorageBackend { - cfg, err := toml.LoadFile(storageBackendConfigPath) - if err != nil { - return fmt.Errorf("failed to load config file %v: %w", storageBackendConfigPath, err) - } - if err = cfg.Unmarshal(&storageBackendConfig); err != nil { - return fmt.Errorf("error unmarshalling TOML config: %w", err) - } - storageBackendConfig.BufferedStorageBackendFactory = ledgerbackend.NewBufferedStorageBackend - storageBackendConfig.DataStoreFactory = datastore.NewDataStore - // when using buffered storage, performance observations have noted optimal parallel batch size - // of 100, apply that as default if the flag was absent. - if !viper.IsSet("parallel-job-size") { - parallelJobSize = 100 - } - options.NoCaptiveCore = true - } - - err := horizon.ApplyFlags(globalConfig, globalFlags, options) - if err != nil { - return err - } - return runDBReingestRangeFn( - []history.LedgerRange{{StartSequence: argsUInt32[0], EndSequence: argsUInt32[1]}}, - reingestForce, - parallelWorkers, - *globalConfig, - storageBackendConfig, - ) - }, -} - var dbFillGapsCmdOpts = ingestRangeCmdOpts() -var dbFillGapsCmd = &cobra.Command{ - Use: "fill-gaps [Start sequence number] [End sequence number]", - Short: "Ingests any gaps found in the horizon db", - Long: "Ingests any gaps found in the horizon db. The command takes an optional start and end parameters which restrict the range of ledgers ingested.", - RunE: func(cmd *cobra.Command, args []string) error { - if err := dbFillGapsCmdOpts.RequireE(); err != nil { - return err - } - if err := dbFillGapsCmdOpts.SetValues(); err != nil { - return err - } - - if len(args) != 0 && len(args) != 2 { - hlog.Errorf("Expected either 0 arguments or 2 but found %v arguments", len(args)) - return ErrUsage{cmd} - } - - var start, end uint64 - var withRange bool - if len(args) == 2 { - var err error - start, err = strconv.ParseUint(args[0], 10, 32) - if err != nil { - cmd.Usage() - return fmt.Errorf(`invalid sequence number "%s"`, args[0]) - } - end, err = strconv.ParseUint(args[1], 10, 32) - if err != nil { - cmd.Usage() - return fmt.Errorf(`invalid sequence number "%s"`, args[1]) - } - withRange = true - } - - var storageBackendConfig ingest.StorageBackendConfig - options := horizon.ApplyOptions{RequireCaptiveCoreFullConfig: false} - if ledgerBackendType == ingest.BufferedStorageBackend { - cfg, err := toml.LoadFile(storageBackendConfigPath) - if err != nil { - return fmt.Errorf("failed to load config file %v: %w", storageBackendConfigPath, err) - } - if err = cfg.Unmarshal(&storageBackendConfig); err != nil { - return fmt.Errorf("error unmarshalling TOML config: %w", err) - } - storageBackendConfig.BufferedStorageBackendFactory = ledgerbackend.NewBufferedStorageBackend - storageBackendConfig.DataStoreFactory = datastore.NewDataStore - options.NoCaptiveCore = true - } - - err := horizon.ApplyFlags(globalConfig, globalFlags, options) - if err != nil { - return err - } - var gaps []history.LedgerRange - if withRange { - gaps, err = runDBDetectGapsInRange(*globalConfig, uint32(start), uint32(end)) - if err != nil { - return err - } - hlog.Infof("found gaps %v within range [%v, %v]", gaps, start, end) - } else { - gaps, err = runDBDetectGaps(*globalConfig) - if err != nil { - return err - } - hlog.Infof("found gaps %v", gaps) - } - - return runDBReingestRangeFn(gaps, reingestForce, parallelWorkers, *globalConfig, storageBackendConfig) - }, -} func runDBReingestRange(ledgerRanges []history.LedgerRange, reingestForce bool, parallelWorkers uint, config horizon.Config, storageBackendConfig ingest.StorageBackendConfig) error { var err error @@ -558,35 +256,6 @@ the reingest command completes.`) return nil } -var dbDetectGapsCmd = &cobra.Command{ - Use: "detect-gaps", - Short: "detects ingestion gaps in Horizon's database", - Long: "detects ingestion gaps in Horizon's database and prints a list of reingest commands needed to fill the gaps", - RunE: func(cmd *cobra.Command, args []string) error { - if err := requireAndSetFlags(horizon.DatabaseURLFlagName); err != nil { - return err - } - - if len(args) != 0 { - return ErrUsage{cmd} - } - gaps, err := runDBDetectGaps(*globalConfig) - if err != nil { - return err - } - if len(gaps) == 0 { - hlog.Info("No gaps found") - return nil - } - fmt.Println("Horizon commands to run in order to fill in the gaps:") - cmdname := os.Args[0] - for _, g := range gaps { - fmt.Printf("%s db reingest range %d %d\n", cmdname, g.StartSequence, g.EndSequence) - } - return nil - }, -} - func runDBDetectGaps(config horizon.Config) ([]history.LedgerRange, error) { horizonSession, err := db.Open("postgres", config.DatabaseURL) if err != nil { @@ -607,7 +276,352 @@ func runDBDetectGapsInRange(config horizon.Config, start, end uint32) ([]history return q.GetLedgerGapsInRange(context.Background(), start, end) } -func init() { +func DefineDBCommands(rootCmd *cobra.Command, horizonConfig *horizon.Config, horizonFlags config.ConfigOptions) { + dbCmd = &cobra.Command{ + Use: "db [command]", + Short: "commands to manage horizon's postgres db", + } + + dbMigrateCmd = &cobra.Command{ + Use: "migrate [command]", + Short: "commands to run schema migrations on horizon's postgres db", + } + + dbInitCmd = &cobra.Command{ + Use: "init", + Short: "install schema", + Long: "init initializes the postgres database used by horizon.", + RunE: func(cmd *cobra.Command, args []string) error { + if err := requireAndSetFlags(horizonFlags, horizon.DatabaseURLFlagName, horizon.IngestFlagName); err != nil { + return err + } + + db, err := sql.Open("postgres", horizonConfig.DatabaseURL) + if err != nil { + return err + } + + numMigrationsRun, err := schema.Migrate(db, schema.MigrateUp, 0) + if err != nil { + return err + } + + if numMigrationsRun == 0 { + log.Println("No migrations applied.") + } else { + log.Printf("Successfully applied %d migrations.\n", numMigrationsRun) + } + return nil + }, + } + + dbMigrateDownCmd = &cobra.Command{ + Use: "down COUNT", + Short: "run downwards db schema migrations", + Long: "performs a downards schema migration command", + RunE: func(cmd *cobra.Command, args []string) error { + if err := requireAndSetFlags(horizonFlags, horizon.DatabaseURLFlagName, horizon.IngestFlagName); err != nil { + return err + } + + // Only allow invocations with 1 args. + if len(args) != 1 { + return ErrUsage{cmd} + } + + count, err := strconv.Atoi(args[0]) + if err != nil { + log.Println(err) + return ErrUsage{cmd} + } + + return migrate(schema.MigrateDown, count, horizonConfig) + }, + } + + dbMigrateRedoCmd = &cobra.Command{ + Use: "redo COUNT", + Short: "redo db schema migrations", + Long: "performs a redo schema migration command", + RunE: func(cmd *cobra.Command, args []string) error { + if err := requireAndSetFlags(horizonFlags, horizon.DatabaseURLFlagName, horizon.IngestFlagName); err != nil { + return err + } + + // Only allow invocations with 1 args. + if len(args) != 1 { + return ErrUsage{cmd} + } + + count, err := strconv.Atoi(args[0]) + if err != nil { + log.Println(err) + return ErrUsage{cmd} + } + + return migrate(schema.MigrateRedo, count, horizonConfig) + }, + } + + dbMigrateStatusCmd = &cobra.Command{ + Use: "status", + Short: "print current database migration status", + Long: "print current database migration status", + RunE: func(cmd *cobra.Command, args []string) error { + if err := requireAndSetFlags(horizonFlags, horizon.DatabaseURLFlagName); err != nil { + return err + } + + // Only allow invocations with 0 args. + if len(args) != 0 { + fmt.Println(args) + return ErrUsage{cmd} + } + + dbConn, err := db.Open("postgres", horizonConfig.DatabaseURL) + if err != nil { + return err + } + + status, err := schema.Status(dbConn.DB.DB) + if err != nil { + return err + } + + fmt.Println(status) + return nil + }, + } + + dbMigrateUpCmd = &cobra.Command{ + Use: "up [COUNT]", + Short: "run upwards db schema migrations", + Long: "performs an upwards schema migration command", + RunE: func(cmd *cobra.Command, args []string) error { + if err := requireAndSetFlags(horizonFlags, horizon.DatabaseURLFlagName, horizon.IngestFlagName); err != nil { + return err + } + + // Only allow invocations with 0-1 args. + if len(args) > 1 { + return ErrUsage{cmd} + } + + count := 0 + if len(args) == 1 { + var err error + count, err = strconv.Atoi(args[0]) + if err != nil { + log.Println(err) + return ErrUsage{cmd} + } + } + + return migrate(schema.MigrateUp, count, horizonConfig) + }, + } + + dbReapCmd = &cobra.Command{ + Use: "reap", + Short: "reaps (i.e. removes) any reapable history data", + Long: "reap removes any historical data that is earlier than the configured retention cutoff", + RunE: func(cmd *cobra.Command, args []string) error { + + err := horizon.ApplyFlags(horizonConfig, horizonFlags, horizon.ApplyOptions{RequireCaptiveCoreFullConfig: false}) + if err != nil { + return err + } + + session, err := db.Open("postgres", horizonConfig.DatabaseURL) + if err != nil { + return fmt.Errorf("cannot open Horizon DB: %v", err) + } + defer session.Close() + + reaper := ingest.NewReaper( + ingest.ReapConfig{ + RetentionCount: uint32(horizonConfig.HistoryRetentionCount), + BatchSize: uint32(horizonConfig.HistoryRetentionReapCount), + }, + session, + ) + ctx, cancel := signal.NotifyContext(context.Background(), os.Interrupt, os.Kill) + defer cancel() + return reaper.DeleteUnretainedHistory(ctx) + }, + } + + dbReingestCmd = &cobra.Command{ + Use: "reingest", + Short: "reingest commands", + Long: "reingest ingests historical data for every ledger or ledgers specified by subcommand", + RunE: func(cmd *cobra.Command, args []string) error { + fmt.Println("Use one of the subcomands...") + return ErrUsage{cmd} + }, + } + + dbReingestRangeCmd = &cobra.Command{ + Use: "range [Start sequence number] [End sequence number]", + Short: "reingests ledgers within a range", + Long: "reingests ledgers between X and Y sequence number (closed intervals)", + RunE: func(cmd *cobra.Command, args []string) error { + if err := dbReingestRangeCmdOpts.RequireE(); err != nil { + return err + } + if err := dbReingestRangeCmdOpts.SetValues(); err != nil { + return err + } + + if len(args) != 2 { + return ErrUsage{cmd} + } + + argsUInt32 := make([]uint32, 2) + for i, arg := range args { + if seq, err := strconv.ParseUint(arg, 10, 32); err != nil { + cmd.Usage() + return fmt.Errorf(`invalid sequence number "%s"`, arg) + } else { + argsUInt32[i] = uint32(seq) + } + } + + var storageBackendConfig ingest.StorageBackendConfig + options := horizon.ApplyOptions{RequireCaptiveCoreFullConfig: false} + if ledgerBackendType == ingest.BufferedStorageBackend { + cfg, err := toml.LoadFile(storageBackendConfigPath) + if err != nil { + return fmt.Errorf("failed to load config file %v: %w", storageBackendConfigPath, err) + } + if err = cfg.Unmarshal(&storageBackendConfig); err != nil { + return fmt.Errorf("error unmarshalling TOML config: %w", err) + } + storageBackendConfig.BufferedStorageBackendFactory = ledgerbackend.NewBufferedStorageBackend + storageBackendConfig.DataStoreFactory = datastore.NewDataStore + // when using buffered storage, performance observations have noted optimal parallel batch size + // of 100, apply that as default if the flag was absent. + if !viper.IsSet("parallel-job-size") { + parallelJobSize = 100 + } + options.NoCaptiveCore = true + } + + err := horizon.ApplyFlags(horizonConfig, horizonFlags, options) + if err != nil { + return err + } + return runDBReingestRangeFn( + []history.LedgerRange{{StartSequence: argsUInt32[0], EndSequence: argsUInt32[1]}}, + reingestForce, + parallelWorkers, + *horizonConfig, + storageBackendConfig, + ) + }, + } + + dbFillGapsCmd = &cobra.Command{ + Use: "fill-gaps [Start sequence number] [End sequence number]", + Short: "Ingests any gaps found in the horizon db", + Long: "Ingests any gaps found in the horizon db. The command takes an optional start and end parameters which restrict the range of ledgers ingested.", + RunE: func(cmd *cobra.Command, args []string) error { + if err := dbFillGapsCmdOpts.RequireE(); err != nil { + return err + } + if err := dbFillGapsCmdOpts.SetValues(); err != nil { + return err + } + + if len(args) != 0 && len(args) != 2 { + hlog.Errorf("Expected either 0 arguments or 2 but found %v arguments", len(args)) + return ErrUsage{cmd} + } + + var start, end uint64 + var withRange bool + if len(args) == 2 { + var err error + start, err = strconv.ParseUint(args[0], 10, 32) + if err != nil { + cmd.Usage() + return fmt.Errorf(`invalid sequence number "%s"`, args[0]) + } + end, err = strconv.ParseUint(args[1], 10, 32) + if err != nil { + cmd.Usage() + return fmt.Errorf(`invalid sequence number "%s"`, args[1]) + } + withRange = true + } + + var storageBackendConfig ingest.StorageBackendConfig + options := horizon.ApplyOptions{RequireCaptiveCoreFullConfig: false} + if ledgerBackendType == ingest.BufferedStorageBackend { + cfg, err := toml.LoadFile(storageBackendConfigPath) + if err != nil { + return fmt.Errorf("failed to load config file %v: %w", storageBackendConfigPath, err) + } + if err = cfg.Unmarshal(&storageBackendConfig); err != nil { + return fmt.Errorf("error unmarshalling TOML config: %w", err) + } + storageBackendConfig.BufferedStorageBackendFactory = ledgerbackend.NewBufferedStorageBackend + storageBackendConfig.DataStoreFactory = datastore.NewDataStore + options.NoCaptiveCore = true + } + + err := horizon.ApplyFlags(horizonConfig, horizonFlags, options) + if err != nil { + return err + } + var gaps []history.LedgerRange + if withRange { + gaps, err = runDBDetectGapsInRange(*horizonConfig, uint32(start), uint32(end)) + if err != nil { + return err + } + hlog.Infof("found gaps %v within range [%v, %v]", gaps, start, end) + } else { + gaps, err = runDBDetectGaps(*horizonConfig) + if err != nil { + return err + } + hlog.Infof("found gaps %v", gaps) + } + + return runDBReingestRangeFn(gaps, reingestForce, parallelWorkers, *horizonConfig, storageBackendConfig) + }, + } + + dbDetectGapsCmd = &cobra.Command{ + Use: "detect-gaps", + Short: "detects ingestion gaps in Horizon's database", + Long: "detects ingestion gaps in Horizon's database and prints a list of reingest commands needed to fill the gaps", + RunE: func(cmd *cobra.Command, args []string) error { + if err := requireAndSetFlags(horizonFlags, horizon.DatabaseURLFlagName); err != nil { + return err + } + + if len(args) != 0 { + return ErrUsage{cmd} + } + gaps, err := runDBDetectGaps(*horizonConfig) + if err != nil { + return err + } + if len(gaps) == 0 { + hlog.Info("No gaps found") + return nil + } + fmt.Println("Horizon commands to run in order to fill in the gaps:") + cmdname := os.Args[0] + for _, g := range gaps { + fmt.Printf("%s db reingest range %d %d\n", cmdname, g.StartSequence, g.EndSequence) + } + return nil + }, + } + if err := dbReingestRangeCmdOpts.Init(dbReingestRangeCmd); err != nil { log.Fatal(err.Error()) } @@ -618,7 +632,7 @@ func init() { viper.BindPFlags(dbReingestRangeCmd.PersistentFlags()) viper.BindPFlags(dbFillGapsCmd.PersistentFlags()) - RootCmd.AddCommand(dbCmd) + rootCmd.AddCommand(dbCmd) dbCmd.AddCommand( dbInitCmd, dbMigrateCmd, @@ -635,3 +649,7 @@ func init() { ) dbReingestCmd.AddCommand(dbReingestRangeCmd) } + +func init() { + DefineDBCommands(RootCmd, globalConfig, globalFlags) +} diff --git a/services/horizon/cmd/db_test.go b/services/horizon/cmd/db_test.go index dea3e3777f..d3fbcaf345 100644 --- a/services/horizon/cmd/db_test.go +++ b/services/horizon/cmd/db_test.go @@ -3,6 +3,7 @@ package cmd import ( "testing" + "github.com/spf13/cobra" "github.com/stretchr/testify/require" "github.com/stretchr/testify/suite" @@ -19,21 +20,8 @@ func TestDBCommandsTestSuite(t *testing.T) { type DBCommandsTestSuite struct { suite.Suite - dsn string -} - -func (s *DBCommandsTestSuite) SetupTest() { - resetFlags() -} - -func resetFlags() { - RootCmd.ResetFlags() - dbFillGapsCmd.ResetFlags() - dbReingestRangeCmd.ResetFlags() - - globalFlags.Init(RootCmd) - dbFillGapsCmdOpts.Init(dbFillGapsCmd) - dbReingestRangeCmdOpts.Init(dbReingestRangeCmd) + db *dbtest.DB + rootCmd *cobra.Command } func (s *DBCommandsTestSuite) SetupSuite() { @@ -42,18 +30,25 @@ func (s *DBCommandsTestSuite) SetupSuite() { return nil } - newDB := dbtest.Postgres(s.T()) - s.dsn = newDB.DSN + s.db = dbtest.Postgres(s.T()) RootCmd.SetArgs([]string{ - "db", "migrate", "up", "--db-url", s.dsn}) + "db", "migrate", "up", "--db-url", s.db.DSN}) require.NoError(s.T(), RootCmd.Execute()) } +func (s *DBCommandsTestSuite) TearDownSuite() { + s.db.Close() +} + +func (s *DBCommandsTestSuite) BeforeTest(suiteName string, testName string) { + s.rootCmd = NewRootCmd() +} + func (s *DBCommandsTestSuite) TestDefaultParallelJobSizeForBufferedBackend() { - RootCmd.SetArgs([]string{ + s.rootCmd.SetArgs([]string{ "db", "reingest", "range", - "--db-url", s.dsn, + "--db-url", s.db.DSN, "--network", "testnet", "--parallel-workers", "2", "--ledgerbackend", "datastore", @@ -61,14 +56,14 @@ func (s *DBCommandsTestSuite) TestDefaultParallelJobSizeForBufferedBackend() { "2", "10"}) - require.NoError(s.T(), dbReingestRangeCmd.Execute()) + require.NoError(s.T(), s.rootCmd.Execute()) require.Equal(s.T(), parallelJobSize, uint32(100)) } func (s *DBCommandsTestSuite) TestDefaultParallelJobSizeForCaptiveBackend() { - RootCmd.SetArgs([]string{ + s.rootCmd.SetArgs([]string{ "db", "reingest", "range", - "--db-url", s.dsn, + "--db-url", s.db.DSN, "--network", "testnet", "--stellar-core-binary-path", "/test/core/bin/path", "--parallel-workers", "2", @@ -76,14 +71,14 @@ func (s *DBCommandsTestSuite) TestDefaultParallelJobSizeForCaptiveBackend() { "2", "10"}) - require.NoError(s.T(), RootCmd.Execute()) + require.NoError(s.T(), s.rootCmd.Execute()) require.Equal(s.T(), parallelJobSize, uint32(100_000)) } func (s *DBCommandsTestSuite) TestUsesParallelJobSizeWhenSetForCaptive() { - RootCmd.SetArgs([]string{ + s.rootCmd.SetArgs([]string{ "db", "reingest", "range", - "--db-url", s.dsn, + "--db-url", s.db.DSN, "--network", "testnet", "--stellar-core-binary-path", "/test/core/bin/path", "--parallel-workers", "2", @@ -92,14 +87,14 @@ func (s *DBCommandsTestSuite) TestUsesParallelJobSizeWhenSetForCaptive() { "2", "10"}) - require.NoError(s.T(), RootCmd.Execute()) + require.NoError(s.T(), s.rootCmd.Execute()) require.Equal(s.T(), parallelJobSize, uint32(5)) } func (s *DBCommandsTestSuite) TestUsesParallelJobSizeWhenSetForBuffered() { - RootCmd.SetArgs([]string{ + s.rootCmd.SetArgs([]string{ "db", "reingest", "range", - "--db-url", s.dsn, + "--db-url", s.db.DSN, "--network", "testnet", "--parallel-workers", "2", "--parallel-job-size", "5", @@ -108,7 +103,7 @@ func (s *DBCommandsTestSuite) TestUsesParallelJobSizeWhenSetForBuffered() { "2", "10"}) - require.NoError(s.T(), RootCmd.Execute()) + require.NoError(s.T(), s.rootCmd.Execute()) require.Equal(s.T(), parallelJobSize, uint32(5)) } @@ -249,21 +244,21 @@ func (s *DBCommandsTestSuite) TestDbReingestAndFillGapsCmds() { for _, command := range commands { for _, tt := range tests { s.T().Run(tt.name+"_"+command.name, func(t *testing.T) { - resetFlags() + rootCmd := NewRootCmd() var args []string args = append(command.cmd, tt.args...) - RootCmd.SetArgs(append([]string{ - "--db-url", s.dsn, + rootCmd.SetArgs(append([]string{ + "--db-url", s.db.DSN, "--stellar-core-binary-path", "/test/core/bin/path", }, args...)) if tt.expectError { - err := RootCmd.Execute() + err := rootCmd.Execute() require.Error(t, err) require.Contains(t, err.Error(), tt.errorMessage) } else { - require.NoError(t, RootCmd.Execute()) + require.NoError(t, rootCmd.Execute()) } }) } diff --git a/services/horizon/cmd/root.go b/services/horizon/cmd/root.go index d2900496d4..099979c97b 100644 --- a/services/horizon/cmd/root.go +++ b/services/horizon/cmd/root.go @@ -12,7 +12,13 @@ import ( var ( globalConfig, globalFlags = horizon.Flags() - RootCmd = &cobra.Command{ + RootCmd = createRootCmd(globalConfig, globalFlags) + originalHelpFunc = RootCmd.HelpFunc() + originalUsageFunc = RootCmd.UsageFunc() +) + +func createRootCmd(horizonConfig *horizon.Config, configOptions config.ConfigOptions) *cobra.Command { + return &cobra.Command{ Use: "horizon", Short: "client-facing api server for the Stellar network", SilenceErrors: true, @@ -23,16 +29,44 @@ var ( "DEPRECATED - the use of command-line flags has been deprecated in favor of environment variables. Please" + "consult our Configuring section in the developer documentation on how to use them - https://developers.stellar.org/docs/run-api-server/configuring", RunE: func(cmd *cobra.Command, args []string) error { - app, err := horizon.NewAppFromFlags(globalConfig, globalFlags) + app, err := horizon.NewAppFromFlags(horizonConfig, configOptions) if err != nil { return err } return app.Serve() }, } - originalHelpFunc = RootCmd.HelpFunc() - originalUsageFunc = RootCmd.UsageFunc() -) +} + +func initRootCmd(cmd *cobra.Command, + originalHelpFn func(*cobra.Command, []string), + originalUsageFn func(*cobra.Command) error, + horizonGlobalFlags config.ConfigOptions) { + // override the default help output, apply further filtering on which global flags + // will be shown on the help outout dependent on the command help was issued upon. + cmd.SetHelpFunc(func(c *cobra.Command, args []string) { + enableGlobalOptionsInHelp(c, horizonGlobalFlags) + originalHelpFn(c, args) + }) + + cmd.SetUsageFunc(func(c *cobra.Command) error { + enableGlobalOptionsInHelp(c, horizonGlobalFlags) + return originalUsageFn(c) + }) + + err := horizonGlobalFlags.Init(cmd) + if err != nil { + stdLog.Fatal(err.Error()) + } +} + +func NewRootCmd() *cobra.Command { + horizonGlobalConfig, horizonGlobalFlags := horizon.Flags() + cmd := createRootCmd(horizonGlobalConfig, horizonGlobalFlags) + initRootCmd(cmd, cmd.HelpFunc(), cmd.UsageFunc(), horizonGlobalFlags) + DefineDBCommands(cmd, horizonGlobalConfig, horizonGlobalFlags) + return cmd +} // ErrUsage indicates we should print the usage string and exit with code 1 type ErrUsage struct { @@ -51,23 +85,7 @@ func (e ErrExitCode) Error() string { } func init() { - - // override the default help output, apply further filtering on which global flags - // will be shown on the help outout dependent on the command help was issued upon. - RootCmd.SetHelpFunc(func(c *cobra.Command, args []string) { - enableGlobalOptionsInHelp(c, globalFlags) - originalHelpFunc(c, args) - }) - - RootCmd.SetUsageFunc(func(c *cobra.Command) error { - enableGlobalOptionsInHelp(c, globalFlags) - return originalUsageFunc(c) - }) - - err := globalFlags.Init(RootCmd) - if err != nil { - stdLog.Fatal(err.Error()) - } + initRootCmd(RootCmd, originalHelpFunc, originalUsageFunc, globalFlags) } func Execute() error { diff --git a/services/horizon/internal/integration/db_test.go b/services/horizon/internal/integration/db_test.go index 1f1d2277ec..86a86a8055 100644 --- a/services/horizon/internal/integration/db_test.go +++ b/services/horizon/internal/integration/db_test.go @@ -3,16 +3,24 @@ package integration import ( "context" "fmt" + "net" + "os" "path/filepath" "strconv" "testing" "time" + "github.com/fsouza/fake-gcs-server/fakestorage" + cp "github.com/otiai10/copy" + "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" "github.com/stellar/go/clients/horizonclient" + sdk "github.com/stellar/go/clients/horizonclient" "github.com/stellar/go/historyarchive" "github.com/stellar/go/keypair" + "github.com/stellar/go/network" hProtocol "github.com/stellar/go/protocols/horizon" horizoncmd "github.com/stellar/go/services/horizon/cmd" horizon "github.com/stellar/go/services/horizon/internal" @@ -25,6 +33,7 @@ import ( "github.com/stellar/go/support/db/dbtest" "github.com/stellar/go/txnbuild" "github.com/stellar/go/xdr" + "github.com/stellar/throttled" ) func submitLiquidityPoolOps(itest *integration.Test, tt *assert.Assertions) (submittedOperations []txnbuild.Operation, lastLedger int32) { @@ -485,7 +494,8 @@ func TestReingestDB(t *testing.T) { horizonConfig := itest.GetHorizonIngestConfig() t.Run("validate parallel range", func(t *testing.T) { - horizoncmd.RootCmd.SetArgs(command(t, horizonConfig, + var rootCmd = horizoncmd.NewRootCmd() + rootCmd.SetArgs(command(t, horizonConfig, "db", "reingest", "range", @@ -494,7 +504,7 @@ func TestReingestDB(t *testing.T) { "2", )) - assert.EqualError(t, horizoncmd.RootCmd.Execute(), "Invalid range: {10 2} from > to") + assert.EqualError(t, rootCmd.Execute(), "Invalid range: {10 2} from > to") }) t.Logf("reached ledger is %v", reachedLedger) @@ -537,7 +547,8 @@ func TestReingestDB(t *testing.T) { "captive-core-reingest-range-integration-tests.cfg", ) - horizoncmd.RootCmd.SetArgs(command(t, horizonConfig, "db", + var rootCmd = horizoncmd.NewRootCmd() + rootCmd.SetArgs(command(t, horizonConfig, "db", "reingest", "range", "--parallel-workers=1", @@ -545,8 +556,135 @@ func TestReingestDB(t *testing.T) { fmt.Sprintf("%d", toLedger), )) - tt.NoError(horizoncmd.RootCmd.Execute()) - tt.NoError(horizoncmd.RootCmd.Execute(), "Repeat the same reingest range against db, should not have errors.") + tt.NoError(rootCmd.Execute()) + tt.NoError(rootCmd.Execute(), "Repeat the same reingest range against db, should not have errors.") +} + +func TestReingestDatastore(t *testing.T) { + if os.Getenv("HORIZON_INTEGRATION_TESTS_ENABLED") == "" { + t.Skip("skipping integration test: HORIZON_INTEGRATION_TESTS_ENABLED not set") + } + + newDB := dbtest.Postgres(t) + defer newDB.Close() + var rootCmd = horizoncmd.NewRootCmd() + rootCmd.SetArgs([]string{ + "db", "migrate", "up", "--db-url", newDB.DSN}) + require.NoError(t, rootCmd.Execute()) + + testTempDir := t.TempDir() + tempSeedDataPath := filepath.Join(testTempDir, "data") + tempSeedBucketPath := filepath.Join(tempSeedDataPath, "path", "to", "my", "bucket") + tempSeedBucketFolder := filepath.Join(tempSeedBucketPath, "FFFFFFFF--0-63999") + if err := os.MkdirAll(tempSeedBucketFolder, 0777); err != nil { + t.Fatalf("unable to create seed data in temp path, %v", err) + } + + err := cp.Copy("./testdata/testbucket", tempSeedBucketFolder) + if err != nil { + t.Fatalf("unable to copy seed data files for fake gcs, %v", err) + } + + testWriter := &testWriter{test: t} + opts := fakestorage.Options{ + Scheme: "http", + Host: "127.0.0.1", + Port: uint16(0), + Writer: testWriter, + Seed: tempSeedDataPath, + StorageRoot: filepath.Join(testTempDir, "bucket"), + PublicHost: "127.0.0.1", + } + + gcsServer, err := fakestorage.NewServerWithOptions(opts) + + if err != nil { + t.Fatalf("couldn't start the fake gcs http server %v", err) + } + + defer gcsServer.Stop() + t.Logf("fake gcs server started at %v", gcsServer.URL()) + t.Setenv("STORAGE_EMULATOR_HOST", gcsServer.URL()) + + rootCmd = horizoncmd.NewRootCmd() + rootCmd.SetArgs([]string{"db", + "reingest", + "range", + "--db-url", newDB.DSN, + "--network", "testnet", + "--parallel-workers", "1", + "--ledgerbackend", "datastore", + "--datastore-config", "../../config.storagebackend.toml", + "997", + "999"}) + + require.NoError(t, rootCmd.Execute()) + + listener, webApp, webPort, err := dynamicHorizonWeb(newDB.DSN) + if err != nil { + t.Fatalf("couldn't create and start horizon web app on dynamic port %v", err) + } + + webAppDone := make(chan struct{}) + go func() { + defer close(webAppDone) + if err = listener.Close(); err != nil { + return + } + webApp.Serve() + }() + + defer func() { + webApp.Close() + select { + case <-webAppDone: + return + default: + } + }() + + horizonClient := &sdk.Client{ + HorizonURL: fmt.Sprintf("http://localhost:%v", webPort), + } + + // wait until the web server is up before continuing to test requests + require.Eventually(t, func() bool { + if _, horizonErr := horizonClient.Root(); horizonErr != nil { + return false + } + return true + }, time.Second*15, time.Millisecond*100) + + _, err = horizonClient.LedgerDetail(998) + require.NoError(t, err) +} + +func dynamicHorizonWeb(dsn string) (net.Listener, *horizon.App, uint, error) { + listener, err := net.Listen("tcp", "localhost:0") + if err != nil { + return nil, nil, 0, err + } + webPort := uint(listener.Addr().(*net.TCPAddr).Port) + + webApp, err := horizon.NewApp(horizon.Config{ + DatabaseURL: dsn, + Port: webPort, + NetworkPassphrase: network.TestNetworkPassphrase, + LogLevel: logrus.InfoLevel, + DisableTxSub: true, + Ingest: false, + ConnectionTimeout: 10 * time.Second, + RateQuota: &throttled.RateQuota{ + MaxRate: throttled.PerHour(1000), + MaxBurst: 100, + }, + }) + if err != nil { + listener.Close() + return nil, nil, 0, err + } + + return listener, webApp, webPort, nil } func TestReingestDBWithFilterRules(t *testing.T) { @@ -648,22 +786,24 @@ func TestReingestDBWithFilterRules(t *testing.T) { itest.StopHorizon() // clear the db with reaping all ledgers - horizoncmd.RootCmd.SetArgs(command(t, itest.GetHorizonIngestConfig(), "db", + var rootCmd = horizoncmd.NewRootCmd() + rootCmd.SetArgs(command(t, itest.GetHorizonIngestConfig(), "db", "reap", "--history-retention-count=1", )) - tt.NoError(horizoncmd.RootCmd.Execute()) + tt.NoError(rootCmd.Execute()) // repopulate the db with reingestion which should catchup using core reapply filter rules // correctly on reingestion ranged - horizoncmd.RootCmd.SetArgs(command(t, itest.GetHorizonIngestConfig(), "db", + rootCmd = horizoncmd.NewRootCmd() + rootCmd.SetArgs(command(t, itest.GetHorizonIngestConfig(), "db", "reingest", "range", "1", fmt.Sprintf("%d", reachedLedger), )) - tt.NoError(horizoncmd.RootCmd.Execute()) + tt.NoError(rootCmd.Execute()) // bring up horizon, just the api server no ingestion, to query // for tx's that should have been repopulated on db from reingestion per @@ -733,12 +873,13 @@ func TestMigrateIngestIsTrueByDefault(t *testing.T) { newDB := dbtest.Postgres(t) freshHorizonPostgresURL := newDB.DSN - horizoncmd.RootCmd.SetArgs([]string{ + rootCmd := horizoncmd.NewRootCmd() + rootCmd.SetArgs([]string{ // ingest is set to true by default "--db-url", freshHorizonPostgresURL, "db", "migrate", "up", }) - tt.NoError(horizoncmd.RootCmd.Execute()) + tt.NoError(rootCmd.Execute()) dbConn, err := db.Open("postgres", freshHorizonPostgresURL) tt.NoError(err) @@ -754,12 +895,13 @@ func TestMigrateChecksIngestFlag(t *testing.T) { newDB := dbtest.Postgres(t) freshHorizonPostgresURL := newDB.DSN - horizoncmd.RootCmd.SetArgs([]string{ + rootCmd := horizoncmd.NewRootCmd() + rootCmd.SetArgs([]string{ "--ingest=false", "--db-url", freshHorizonPostgresURL, "db", "migrate", "up", }) - tt.NoError(horizoncmd.RootCmd.Execute()) + tt.NoError(rootCmd.Execute()) dbConn, err := db.Open("postgres", freshHorizonPostgresURL) tt.NoError(err) @@ -802,7 +944,8 @@ func TestFillGaps(t *testing.T) { tt.NoError(err) t.Run("validate parallel range", func(t *testing.T) { - horizoncmd.RootCmd.SetArgs(command(t, horizonConfig, + var rootCmd = horizoncmd.NewRootCmd() + rootCmd.SetArgs(command(t, horizonConfig, "db", "fill-gaps", "--parallel-workers=2", @@ -810,7 +953,7 @@ func TestFillGaps(t *testing.T) { "2", )) - assert.EqualError(t, horizoncmd.RootCmd.Execute(), "Invalid range: {10 2} from > to") + assert.EqualError(t, rootCmd.Execute(), "Invalid range: {10 2} from > to") }) // make sure a full checkpoint has elapsed otherwise there will be nothing to reingest @@ -842,21 +985,25 @@ func TestFillGaps(t *testing.T) { filepath.Dir(horizonConfig.CaptiveCoreConfigPath), "captive-core-reingest-range-integration-tests.cfg", ) - horizoncmd.RootCmd.SetArgs(command(t, horizonConfig, "db", "fill-gaps", "--parallel-workers=1")) - tt.NoError(horizoncmd.RootCmd.Execute()) + + rootCmd := horizoncmd.NewRootCmd() + rootCmd.SetArgs(command(t, horizonConfig, "db", "fill-gaps", "--parallel-workers=1")) + tt.NoError(rootCmd.Execute()) tt.NoError(historyQ.LatestLedger(context.Background(), &latestLedger)) tt.Equal(int64(0), latestLedger) - horizoncmd.RootCmd.SetArgs(command(t, horizonConfig, "db", "fill-gaps", "3", "4")) - tt.NoError(horizoncmd.RootCmd.Execute()) + rootCmd = horizoncmd.NewRootCmd() + rootCmd.SetArgs(command(t, horizonConfig, "db", "fill-gaps", "3", "4")) + tt.NoError(rootCmd.Execute()) tt.NoError(historyQ.LatestLedger(context.Background(), &latestLedger)) tt.NoError(historyQ.ElderLedger(context.Background(), &oldestLedger)) tt.Equal(int64(3), oldestLedger) tt.Equal(int64(4), latestLedger) - horizoncmd.RootCmd.SetArgs(command(t, horizonConfig, "db", "fill-gaps", "6", "7")) - tt.NoError(horizoncmd.RootCmd.Execute()) + rootCmd = horizoncmd.NewRootCmd() + rootCmd.SetArgs(command(t, horizonConfig, "db", "fill-gaps", "6", "7")) + tt.NoError(rootCmd.Execute()) tt.NoError(historyQ.LatestLedger(context.Background(), &latestLedger)) tt.NoError(historyQ.ElderLedger(context.Background(), &oldestLedger)) tt.Equal(int64(3), oldestLedger) @@ -866,8 +1013,9 @@ func TestFillGaps(t *testing.T) { tt.NoError(err) tt.Equal([]history.LedgerRange{{StartSequence: 5, EndSequence: 5}}, gaps) - horizoncmd.RootCmd.SetArgs(command(t, horizonConfig, "db", "fill-gaps")) - tt.NoError(horizoncmd.RootCmd.Execute()) + rootCmd = horizoncmd.NewRootCmd() + rootCmd.SetArgs(command(t, horizonConfig, "db", "fill-gaps")) + tt.NoError(rootCmd.Execute()) tt.NoError(historyQ.LatestLedger(context.Background(), &latestLedger)) tt.NoError(historyQ.ElderLedger(context.Background(), &oldestLedger)) tt.Equal(int64(3), oldestLedger) @@ -876,8 +1024,9 @@ func TestFillGaps(t *testing.T) { tt.NoError(err) tt.Empty(gaps) - horizoncmd.RootCmd.SetArgs(command(t, horizonConfig, "db", "fill-gaps", "2", "8")) - tt.NoError(horizoncmd.RootCmd.Execute()) + rootCmd = horizoncmd.NewRootCmd() + rootCmd.SetArgs(command(t, horizonConfig, "db", "fill-gaps", "2", "8")) + tt.NoError(rootCmd.Execute()) tt.NoError(historyQ.LatestLedger(context.Background(), &latestLedger)) tt.NoError(historyQ.ElderLedger(context.Background(), &oldestLedger)) tt.Equal(int64(2), oldestLedger) @@ -905,3 +1054,12 @@ func TestResumeFromInitializedDB(t *testing.T) { tt.Eventually(successfullyResumed, 1*time.Minute, 1*time.Second) } + +type testWriter struct { + test *testing.T +} + +func (w *testWriter) Write(p []byte) (n int, err error) { + w.test.Log(string(p)) + return len(p), nil +} diff --git a/services/horizon/internal/integration/testdata/testbucket/FFFFFC18--999.xdr.zstd b/services/horizon/internal/integration/testdata/testbucket/FFFFFC18--999.xdr.zstd new file mode 100644 index 0000000000000000000000000000000000000000..b2627e7fc1a95a8a8b50255573b00837a9139fe2 GIT binary patch literal 1547 zcmV+m2K4zTwJ-f-d>HL70F+1o00ZX$0RW*Kpt{u7lbA9>L6Ecqj14{q<4!*qZuzMh zP0G2{H#7hM02H>0jR*+|S#fT3INCscK`Rw*TZaIx?A)0;VAeka0*M=yO9G9GLnHah zP^Fg$Z+jGgxkxm!bJ+0wCgOL7yaE6K003pef#CoF00000)pBXsVo>)E?%)BC-?e*|Cc2Ut*|;5);s=4qnfnhdCe9!Po4Lk zqK=;@G4?uQ92zh!?$qQ>EM=9Ot%0I7lI}id!z0bFbBbkHEhZ|%4XTvg000B$4dAx3 zr(^&rm_q;p0001F08C3j0001K+>^52wkQEkIjEE&{m$XZ)I?UvZ z0096100IC211VD9PxAEIZ=qko>q8obZG~TL#Ji@V^J;a^e=|9(E&vZgKmY&$nE?=e z(Q7O?Gf09ifgbm&&lJ&^if;f^a#Yhk9&LtJ;GFH!OPLr*dqp@jAUQN`PR1){Gk*_o z*GGK$*%)$y*X{rS02fGh=mG!$K#<4UO%36E$PtF+i~Lo5dgNEzn2DcL>fYWwxQLg_ z{@?46%)TPmL~W8sKr&r}OMK@?Tk)Q+Zh&2W21dwn2|&G~StAvNt?Gc*&g*Kn4m_1* z9^Y)vkLy&~9K$3>^~6$tbtDr+1UCEiDWF4;P<|U~0}|eth9{&AdqGiFs}E+SJC=tU zS!h2gDSHvpIb;XuYu~v2iHUsP;P=;f=3LCuaVZNr-8PG4e1-(M2d5X_F$Jb_cs>CM zHcU#))F{XR06=?@pT1XownH^QSmiQm)6msx@ohNO(3R&m3xIna2RZ+LiIbW2)oeiV zMv!gx`K<^GZYf6{m4RBYY)DlZ*a0pTSX!~Bla9h=+MbGE)9?J1C7NRX-jyPPWHl+) zGI{jSfdd_wwVPu#dg+r72loo6_ZR)_TidAm8VW{gDFw>kGX5baq!4r)kF90Ho2Y)d z5CtZ5RFNRF?8eO%a1Lg$=00001R9aI258vo`qwkTqK}Y<4^3hWeao;6d zi&BJdQukz1Z!wN2fByhWMN(HrRZ~PoRZ>p@06=%We)E72A2sa7WzrbhJjaiBda00g z!J9qIp(4A`a$>uUm}HXGjpGe00U*hc9~ze0|Eo) z0b&yK$R!%U0001M00G-^=iGm;G627vdj?J9FctKR3_&ZvaoI*l$#oHx)vyl{5m8et zcgVra!Jg;EH#bUG@-=2JT4=MFJzf)}?*1qE(<^Ie5hPwrDq>e~B$t!{^aTb20008| z@PGjT0Pc(d0R;gA0RaF30RjR80|W#G00t&70000000000fUcm?O%e(KBJxr(-~s^v zDG~?TFbF0fDf}k?gVF+O2r94XOQ5WP8iL9@{qki6)DWl))5lGjQ-uuUfG2Qy+VMhX ze-Ps8($IM1*wJ4ue4QPC#)cDoF;Jg$t8ah8{Q#NQ$MxS3J9t?%gb_wR*#o>V8NhNV z^AR2kcy{uJ<*qrK`&e=QxRLuYqfBPN;bd}FAW)H-jO(IvnnL x4AGaJAPA6rf30ZM&bU4~TeL*V~Zd-={t?b;H zI$+j60|JQv000y}k=G?0L9C5#{HrD$2Yy=k5bTOm5mdK;ZbuH)x-vXc7V45iUA;Au z`-&pa2zjNUUJwB(2NaAejwr|SF(Wnr0001G!hzfX0RR910JU8Zvb%jKPSwCVN2TGl zkjW2MgH=Zy#87*_f&|h$ivR!sKt$Q9swJ(3i20m~KsPsBOdI(@sTtvf0JIqs$>piJ zs!wMvC&E@uIgAx3EOTn$bY8e(zp$+#@BIy2sjKtl1HK?|&N-brL-)Iq7JM%bFq)fG zVCA4qWlGwmTmSnn=T~gfcRD@0F>P!6A$-{D&+~H{0cGy>#k<$~4(HlS^Z)<@<_+Ms zv!`SLDV2u+0ssI2WB^P{KmY&$Y21^t-nJ+KX^&+AdPx^WL^_Z@UW|w(Y?ri4tUAo( zi~s=u0006200+nY+qWhPKzBWtLO&R`nu72p1Gu)m{=~mN+SVGNyh#DD0001bWx|2> z00GM1GX5baq!4r)kF90Ho2Y)d5CtZ5RFNRF?8eO%ah|CZ}Y&ZP>Jgb}7f0}e*L0Jt(6SUXO}CN^gfMlqmG7r?2RjhSZ}Ku6>a z-$nh)!0$q$FXJJ1Bw3$z{&#$8BG0xlSqCyA$7VTU01rYy0001)0T6xBYb-c3NP;eb z9`~xx6w#Q9Zva$sRMS2lZH89hoK@U5M@JmGj;91?Avjw&sq)_=%Gr(e-r+niE*;pu zO8@`>7f5#K0ssI&u%y%q7CB}%O+`<6NQdg*FRfzVo+NqgFV2Qoh5-t$q#j1gx3LAS z!OBJ9Ag-vGLVZ10u0WMSQ@Abh4JcX-KvH|Ng67ZftwPOqqMQ+ILe=1;hD^odedzlt-kD`Lx6d3Zm+2iJi7-@n;F0r~5vPl& zY6wOE0006(R8>Jp0001I%GP6enhtf1wfb)^lUfKy%5*sEcW4keGp;I`qt^2P00006 zj(31lL;yH-AOHYBm}cT^5`a3${Y&3BXk19Z7&<2I_&2-iDkuW>FCaG-{G?@siC7(i z4-_TyeTs>I4qT6wT$b<^1j%Xvu+MxCK+Y*TCCc0}q{T~tyK*5M5wO3UThzwtt?&=8 z>iwkzpiy~7SqAWBM4`Xk8h_4OypQ9f8|PIJCS3P(F>Q%83u|gC0bJM-J%_>9r#F4? zfEWS#5B2LMHc88j41AzOUjB47mT zZgWMSd@_=sfTZDq$Oeq$@$X1w7Q%Y`30J{EQXD%|N0eJVnc3fj$LG#Wgg=AOGKGf( zF)YCx#%I)k+{^3E$+L?W{yBv}I+LSP<{9`ZP2&002Pe2ZZGDmJw=y zB;ky1K2_sR`lVA%Ly{3lgT3&Pgu#yzJGmP4CU8d;4SsOx~*pdSM@!ZUHCQO&gL9+kj zH8Tj4g+B2OOd5s!*jQS8VwZ>MCHnhDqF>?h=bGxD?ru%jedUlXt}FtKn|T8&<%tN3 zJ2KpPv&4$>Z-k;gV(p3cqTV!5RF-{1G{b=l4?s|02&TuJ8bEe{rqZCxX1^c-%t`Nh z5j%OpBMwTEmjFEp000023Nte~1OSl9E5DJ1;54ic1#S!xpR(*)nKCC(`^po0B@wT6 zl;BO^002N!n40XQy5Ll4C`!hQ@@SQ&RPSg>iU7KI+k2 zlDOZrO21f>%tXaLgi2L$WuuH6S3tc_@tWEU%i;?F2NVpITlUSG){s8r#zj=r>P<(b zI5)LG|H!i1?yZf$-C;BIa|*e2Ol{pfjoB?l3xT`ABRI2 zkf)qQ&zYGhlKeE&00EE$*BpHM=G?a82*Cs&x=G>N)2a{xY(AaS>5gz?d&oqU2yKR_rmV#UKaner`rYGbj+yDRq3FOff z`sUmQCNKa100000004m#pz*bt89)F6@|MB?p#=s2L#)H~xcJhQEaOZP#YI#{!U&XI zFK`nsm7p+J!4-fG;l&>A-spsM29A^s7qg8l&4#UgEL`O#vmS@W#qPdLf zNEnSWt%Wm_7kPd<&hbnwI6BkrwD8#szlWy2odzLK0-$&oN1zzO!g-m9C@!Nq5=M)1 zgaz)@Wu*=Beu5a>fIk#-xK$N7D7rqFVV;T6^N)Qre9>YuI#7WiVNGw`5^Nt8D07UP zydVTWd{85~!-Y1p(NSDPb0mxwWd}`zd|{?8qVzkQ8W6iC*@nBz$n(|M$>W5mtOr5t zdL4HdzzQIL1Z;Qa(bdlC&8$vEZ?|8Wl92&N4%P8UAh2#L|anNx!P(FyDR}LL&^hAxE z0I_PQhLw2kSghO&WaFt_ct+~sr~K>$qR6}l5_t&_goF`PzBq05({pgz!_K%T!Wqa1 z5TRj0$QP%retHg0d)OKGL^uQa03tL@2>If))lbjCX%9Q&o(N}PKEVVgG#ovBvAGGq zg`)UTBVgxWnyUN~ikYOUv)CDy)>qjAOnqPB zB_@nJd!bw0NMgjl~0nzM5voQMj`#4>>q@t>tl=aI}IU{pcyOP7fGJOWafekS5|{0hFyrlL?Iom298OC?Z9(||rT_!I*QIA^Hj8yM^y z>XRmpV=8~mE`96#0^(13uSmcnF1|$~AAHF!Je^GlR(aQDho@ymmS)GNYx4`1r#Ex@ zu&CCN4jms!<}7@3OyY`c%JaJuwRnh?G=P~2i4+@-0tl<8y2l94xZmZZiLn&c%J9Xi z*r@rMIO{ayu{#>vI=trt=)62-K-(|u>hbTUDwOf-kv!l&a_5(r5@v|N9A2p7a9X*2 z`Ae#D5bR<8pyc^I)v$~mzOoBk&|I!h$N7MgqebyXdA%-udfj0wW?7lMx7MjNjP@@9cs>Hzs z$H@r1y57aN*3zN$*~{D7ZBLwLYHE)`gP*OWon{0p$23!Mssj;i7cYz}91ssgB^TZI zH`|Zrmzt`Lx7vsOq=!nN4G{xc*YI?kafUKwE<4b-aV?^j<)Fw5sln%7_qbJ+Tb z697aXAw)l-$DLkp_kJ}!04E#+M-m5+_qyY07iQ59D=V0nJd~Q$RSI~2nv!k3fv;6p zgKaIX{&n5)C}A%2VB)7yQPN-}O7Ua&{f0n7CvyKp+Py zm!Qnal<=%anmh^n$1L~A*n3WK%D7v*-$g>N*Z*TvvPz@f-^H%|oMhdJ|EMv(Dp}h* zIFu-ku13?VsNmKUbUmKN(b)GvH2z1a2;AzdxYP`jrLf9N))!2$x`@scttTnwPB~?L zGF;Pb-MhK2MH0P;S-UJkeq(Vm`q15%pG0ElUfQJC&lRX^pM+R{(-t5$y!-o6pffK; z*T_zJ=O6J!0I7wBV29c8qh1$O6bzzeqXc%VK5~v`Cp`cx{Vd0?33$h0=kqv!B;LC5 zou^XuN!6D3;}q0G!8{;H+Kav_xj1dDO^YA$;5G>QdHS+>99cZJWVg`B4*MAF#<5Cp zy%(O}d`+#8Zlau%*i`q3x}L6pV5_UW2q%5N7^u^0j})=_9=4!^aa!dV9%Mk@6ZdgV zWHYs2P!hS1JDKgjAMJrdkqQG<^}UpOQ$Dd>GCj7>zrbRtXnvOYVMqCHd&b!Lgp4#Y zBayX1Z4FBUu`o=h{dTh3E`K4hE|yqee)3RSF^qa)XAsMZ(n6`PjtM{_@GT^~s^Gu} zO%WQ`uArPduTJ|i4G5!I2Aw!Wtk0~SD!c%2#+SszmInih22e~TFlF5;>)h>uM3;2z z=N?33X526*x-D{CsjLhD`oWWvlY=FRPtZ@!0GzhtVfYICU}XvbLe3`YQp)^rF}^pP zHSZ)JWj*s}TbY`)V3mFxEGW=ui2XA3kaCv7Wn}Z9 z0@Y%W*!T8;&;B22Nyr{9<**O=R=eG_(?Hf^Hv16Cg%jCbtXSlQ3 zbg<^ap~pa4B0)C-P^M|7nZsU&Uw3cb27t^bbNT7?;!s!l{}rWMSci$`1wasI#GFA# z55!2mUISz8WMa#VdnJypd>|~`7WK1^cr>&t2Rv@&2;D6|!5^{&a{cTxCMnFpwo{Ez4GrnP<9dUWbAy z?Y|$qUJ<&t{)cVgiB8bUC@)iU!>Xh#d(I+5EW4>jNJehkEu6kjqSz;)o(BTLl-r8& zZpCbz+{xB@FQu*LZs6IeudMjt@QL;_27(9=^39IMrACkxzxyo^*vl%tFz7BX#3Y zPS47znHlzeC4n_|)GXMVpp0M5xc#=Z|BS3=%2lGjLm&QGwXuAX(I_9IXbHK#KetV1 zx+dM(@itqnQm%i^mvQ1t#r>3Lf6PUcBgA#zm-dw83TqfHo3u5yvp;kbV~|fxtBw;K z_BoVwo*!uuS2y~)j-n`R;Evo{G&pl4(l?_5>S4l-z5lo#$Y}dDtNRM?5lo}QnPuDWbms7xRyx5v(+7h{%pwSiFnJp5_ zlG7h)ajesF-H&sd#Gn~JE7V^v>~!yg!u;cjg<5i*&7E>(D(=r2o8L~~uF@DyQ2s1i z_1V~wrGnc0!@Kf@bmmj(*N+v0G3^NwAmQ5&_Mo_hM@%fVa%SDJnRo0WW6vSqshu;X zKJPYbTT9-&XiHywSZkm*c~F~h`QkkZlRO)T4;5~e;(u||+IWF{B41$9NspS4w^g=; z54b1FKsz;%)@mC>((vSx^t_chTu{B0XGA}IxspA!?I4Q%r|&DbK3+ko81WHDvV45# Nk6k%^FvPX4{(mc#_qG54 literal 0 HcmV?d00001 From 01fbf9c59e41498fa6813e11802f15a5fe9afa41 Mon Sep 17 00:00:00 2001 From: shawn Date: Mon, 15 Jul 2024 11:11:57 -0700 Subject: [PATCH 11/24] #4911: updated changelog/readme (#5389) --- .github/workflows/ledgerexporter.yml | 5 ++- services/horizon/CHANGELOG.md | 11 +++++ services/horizon/cmd/db_test.go | 12 +++--- services/horizon/internal/ingest/README.md | 42 ++++++++++++++++++- .../testdata}/config.storagebackend.toml | 0 .../horizon/internal/integration/db_test.go | 2 +- 6 files changed, 61 insertions(+), 11 deletions(-) rename services/horizon/{ => internal/ingest/testdata}/config.storagebackend.toml (100%) diff --git a/.github/workflows/ledgerexporter.yml b/.github/workflows/ledgerexporter.yml index c80a367771..ac1e265582 100644 --- a/.github/workflows/ledgerexporter.yml +++ b/.github/workflows/ledgerexporter.yml @@ -13,9 +13,10 @@ jobs: CAPTIVE_CORE_DEBIAN_PKG_VERSION: 21.1.0-1921.b3aeb14cc.focal LEDGEREXPORTER_INTEGRATION_TESTS_ENABLED: "true" LEDGEREXPORTER_INTEGRATION_TESTS_CAPTIVE_CORE_BIN: /usr/bin/stellar-core - # this pins to a version of quickstart:testing that has the same version as LEDGEREXPORTER_INTEGRATION_TESTS_CAPTIVE_CORE_BIN + # this pins to a version of quickstart:testing that has the same version of core + # as specified on LEDGEREXPORTER_INTEGRATION_TESTS_CAPTIVE_CORE_BIN # this is the multi-arch index sha, get it by 'docker buildx imagetools inspect stellar/quickstart:testing' - LEDGEREXPORTER_INTEGRATION_TESTS_QUICKSTART_IMAGE: docker.io/stellar/quickstart:testing@sha256:03c6679f838a92b1eda4cd3a9e2bdee4c3586e278a138a0acf36a9bc99a0041f + LEDGEREXPORTER_INTEGRATION_TESTS_QUICKSTART_IMAGE: docker.io/stellar/quickstart:testing@sha256:5c8186f53cc98571749054dd782dce33b0aca2d1a622a7610362f7c15b79b1bf LEDGEREXPORTER_INTEGRATION_TESTS_QUICKSTART_IMAGE_PULL: "false" steps: - name: Install captive core diff --git a/services/horizon/CHANGELOG.md b/services/horizon/CHANGELOG.md index 501ad51847..cd5d8af57b 100644 --- a/services/horizon/CHANGELOG.md +++ b/services/horizon/CHANGELOG.md @@ -3,6 +3,17 @@ All notable changes to this project will be documented in this file. This project adheres to [Semantic Versioning](http://semver.org/). +## Pending + +### Added + +- Reingest from pre-computed tx meta on remote cloud storage. ([4911](https://github.com/stellar/go/issues/4911)), ([5374](https://github.com/stellar/go/pull/5374)) + - Configure horizon reingestion to obtain ledger tx meta in pre-computed files from a Google Cloud Storage(GCS) location. + - Using this option will no longer require a captive core binary be present and it no longer runs a captive core sub-process, instead obtaining the tx meta from the GCS backend. + - Horizon supports this new feature with two new parameters `ledgerbackend` and `datastore-config` on the `reingest` command. Refer to [Reingestion README](./internal/ingest/README.md#reingestion). + + + ## 2.31.0 ### Breaking Changes diff --git a/services/horizon/cmd/db_test.go b/services/horizon/cmd/db_test.go index d3fbcaf345..93dd1ce119 100644 --- a/services/horizon/cmd/db_test.go +++ b/services/horizon/cmd/db_test.go @@ -52,7 +52,7 @@ func (s *DBCommandsTestSuite) TestDefaultParallelJobSizeForBufferedBackend() { "--network", "testnet", "--parallel-workers", "2", "--ledgerbackend", "datastore", - "--datastore-config", "../config.storagebackend.toml", + "--datastore-config", "../internal/ingest/testdata/config.storagebackend.toml", "2", "10"}) @@ -99,7 +99,7 @@ func (s *DBCommandsTestSuite) TestUsesParallelJobSizeWhenSetForBuffered() { "--parallel-workers", "2", "--parallel-job-size", "5", "--ledgerbackend", "datastore", - "--datastore-config", "../config.storagebackend.toml", + "--datastore-config", "../internal/ingest/testdata/config.storagebackend.toml", "2", "10"}) @@ -154,7 +154,7 @@ func (s *DBCommandsTestSuite) TestDbReingestAndFillGapsCmds() { args: []string{ "1", "100", "--ledgerbackend", "datastore", - "--datastore-config", "../config.storagebackend.toml", + "--datastore-config", "../internal/ingest/testdata/config.storagebackend.toml", "--network-passphrase", "passphrase", "--history-archive-urls", "[]", }, @@ -165,7 +165,7 @@ func (s *DBCommandsTestSuite) TestDbReingestAndFillGapsCmds() { args: []string{ "1", "100", "--ledgerbackend", "datastore", - "--datastore-config", "../config.storagebackend.toml", + "--datastore-config", "../internal/ingest/testdata/config.storagebackend.toml", }, expectError: true, errorMessage: "network-passphrase must be set", @@ -175,7 +175,7 @@ func (s *DBCommandsTestSuite) TestDbReingestAndFillGapsCmds() { args: []string{ "1", "100", "--ledgerbackend", "datastore", - "--datastore-config", "../config.storagebackend.toml", + "--datastore-config", "../internal/ingest/testdata/config.storagebackend.toml", "--network-passphrase", "passphrase", }, expectError: true, @@ -217,7 +217,7 @@ func (s *DBCommandsTestSuite) TestDbReingestAndFillGapsCmds() { "1", "100", "--network", "testnet", "--ledgerbackend", "datastore", - "--datastore-config", "../config.storagebackend.toml", + "--datastore-config", "../internal/ingest/testdata/config.storagebackend.toml", }, expectError: false, }, diff --git a/services/horizon/internal/ingest/README.md b/services/horizon/internal/ingest/README.md index a0874a0b43..12982b5047 100644 --- a/services/horizon/internal/ingest/README.md +++ b/services/horizon/internal/ingest/README.md @@ -140,8 +140,46 @@ This pauses the state machine for 10 seconds then tries again, in hopes that a n **Next state**: [`start`](#start-state) -# Ingestion -TODO +# Reingestion +Horizon supports running reingestion by executing a sub command `db reingest range ` which will execute as an o/s process and will be synchronous, exiting the process only after the complete reingestion range is finished or an error is encountered. + +By default this sub-command will attempt to use captive core configuration in the form of stellar core binary(`--stellar-core-binary-path`) and stellar core config(`--captive-core-config-path`) to obtain ledger tx meta from a stellar network to be ingested. + +The `db reingest range` sub-command can optionally be configured to consume pre-computed ledger tx meta files from a Google Cloud Storage(GCS) location instead of running captive core on host machine. +Pre-requirements: + - Have a GCS account. + - Run the [ledgerexporter] to publish ledger tx meta files to your GCS bucket location. +Run the `db reingest` sub-command, configured to import tx meta from your GCS bucket: + ```$ DATABASE_URL= \ + NETWORK=testnet \ + stellar-horizon db reingest range \ + --parallel-workers 2 \ + --ledgerbackend "datastore" \ + --datastore-config "config.storagebackend.toml" \ + 100 200 + ``` +Notice, even though we no longer need to provide stellar-core related config for binary or config file, we do still need to provide network related config, using convenience parameter `NETWORK=testnet|pubnet` or directly with `NETWORK_PASSPHRASE` and `HISTORY_ARCHIVE_URLS` + +The `--datastore-config` must point to a new toml config file that will provide the necessary parameters for ingestion to work with remote GCS storage. + +example config toml: +``` +# Datastore Configuration +[datastore_config] +# Specifies the type of datastore. +# Currently, only Google Cloud Storage (GCS) is supported. +type = "GCS" + +[datastore_config.params] +# The Google Cloud Storage bucket path for storing data, with optional subpaths for organization. +destination_bucket_path = "path/to/my/bucket" + +[datastore_config.schema] +# Configuration for data organization of the remote files +ledgers_per_file = 1 # Number of ledgers stored in each file. +files_per_partition = 64000 # Number of files per partition/directory. + +``` # Range Preparation TODO: See `maybePrepareRange` diff --git a/services/horizon/config.storagebackend.toml b/services/horizon/internal/ingest/testdata/config.storagebackend.toml similarity index 100% rename from services/horizon/config.storagebackend.toml rename to services/horizon/internal/ingest/testdata/config.storagebackend.toml diff --git a/services/horizon/internal/integration/db_test.go b/services/horizon/internal/integration/db_test.go index 86a86a8055..b4716f10df 100644 --- a/services/horizon/internal/integration/db_test.go +++ b/services/horizon/internal/integration/db_test.go @@ -614,7 +614,7 @@ func TestReingestDatastore(t *testing.T) { "--network", "testnet", "--parallel-workers", "1", "--ledgerbackend", "datastore", - "--datastore-config", "../../config.storagebackend.toml", + "--datastore-config", "../ingest/testdata/config.storagebackend.toml", "997", "999"}) From 96284d056dfe93e2d4c626b383ccd82121479ca7 Mon Sep 17 00:00:00 2001 From: Shawn Reuland Date: Tue, 16 Jul 2024 10:56:55 -0700 Subject: [PATCH 12/24] 4911: review feedback, remove nil check --- ingest/ledgerbackend/buffered_storage_backend.go | 4 ---- 1 file changed, 4 deletions(-) diff --git a/ingest/ledgerbackend/buffered_storage_backend.go b/ingest/ledgerbackend/buffered_storage_backend.go index bd2f541a4c..e4e81513b6 100644 --- a/ingest/ledgerbackend/buffered_storage_backend.go +++ b/ingest/ledgerbackend/buffered_storage_backend.go @@ -54,10 +54,6 @@ func NewBufferedStorageBackend(ctx context.Context, config BufferedStorageBacken return nil, errors.New("number of workers must be <= BufferSize") } - if dataStore == nil { - return nil, errors.New("no DataStore provided") - } - if dataStore.GetSchema(ctx).LedgersPerFile <= 0 { return nil, errors.New("ledgersPerFile must be > 0") } From 8d543ba21cc4181f11290547696bc7f3311a7876 Mon Sep 17 00:00:00 2001 From: Shawn Reuland Date: Tue, 16 Jul 2024 11:43:47 -0700 Subject: [PATCH 13/24] #4911: use ledger backend enum names for command help, review feedback --- services/horizon/cmd/db.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/services/horizon/cmd/db.go b/services/horizon/cmd/db.go index a07bc0ee10..f4aba19735 100644 --- a/services/horizon/cmd/db.go +++ b/services/horizon/cmd/db.go @@ -150,7 +150,9 @@ func ingestRangeCmdOpts() support.ConfigOptions { OptType: types.String, Required: false, FlagDefault: ingest.CaptiveCoreBackend.String(), - Usage: "[optional] Specify the ledger backend type: 'captive-core' (default) or 'datastore'", + Usage: fmt.Sprintf("[optional] Specify the ledger backend type: '%s' (default) or '%s'", + ingest.CaptiveCoreBackend.String(), + ingest.BufferedStorageBackend.String()), CustomSetValue: func(co *support.ConfigOption) error { val := viper.GetString(co.Name) switch val { From d8110f98e3ee9e803e2a238d5ce9d9e3f94f35a5 Mon Sep 17 00:00:00 2001 From: Shawn Reuland Date: Tue, 16 Jul 2024 13:07:51 -0700 Subject: [PATCH 14/24] #4911: use fakestoreage initial objects instead of seed path for fake bucket files, review feedback --- .../horizon/internal/integration/db_test.go | 49 ++++++++++++------- 1 file changed, 32 insertions(+), 17 deletions(-) diff --git a/services/horizon/internal/integration/db_test.go b/services/horizon/internal/integration/db_test.go index b4716f10df..bcc6f3ebb0 100644 --- a/services/horizon/internal/integration/db_test.go +++ b/services/horizon/internal/integration/db_test.go @@ -3,6 +3,7 @@ package integration import ( "context" "fmt" + "io/fs" "net" "os" "path/filepath" @@ -11,7 +12,6 @@ import ( "time" "github.com/fsouza/fake-gcs-server/fakestorage" - cp "github.com/otiai10/copy" "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" @@ -573,27 +573,42 @@ func TestReingestDatastore(t *testing.T) { require.NoError(t, rootCmd.Execute()) testTempDir := t.TempDir() - tempSeedDataPath := filepath.Join(testTempDir, "data") - tempSeedBucketPath := filepath.Join(tempSeedDataPath, "path", "to", "my", "bucket") - tempSeedBucketFolder := filepath.Join(tempSeedBucketPath, "FFFFFFFF--0-63999") - if err := os.MkdirAll(tempSeedBucketFolder, 0777); err != nil { - t.Fatalf("unable to create seed data in temp path, %v", err) - } + fakeBucketFilesSource := "testdata/testbucket" + fakeBucketFiles := []fakestorage.Object{} - err := cp.Copy("./testdata/testbucket", tempSeedBucketFolder) - if err != nil { - t.Fatalf("unable to copy seed data files for fake gcs, %v", err) + if err := filepath.WalkDir(fakeBucketFilesSource, func(path string, entry fs.DirEntry, err error) error { + if err != nil { + return err + } + + if entry.Type().IsRegular() { + contents, err := os.ReadFile(fmt.Sprintf("%s/%s", fakeBucketFilesSource, entry.Name())) + if err != nil { + return err + } + + fakeBucketFiles = append(fakeBucketFiles, fakestorage.Object{ + ObjectAttrs: fakestorage.ObjectAttrs{ + BucketName: "path", + Name: fmt.Sprintf("to/my/bucket/FFFFFFFF--0-63999/%s", entry.Name()), + }, + Content: contents, + }) + } + return nil + }); err != nil { + t.Fatalf("unable to setup fake bucket files: %v", err) } testWriter := &testWriter{test: t} opts := fakestorage.Options{ - Scheme: "http", - Host: "127.0.0.1", - Port: uint16(0), - Writer: testWriter, - Seed: tempSeedDataPath, - StorageRoot: filepath.Join(testTempDir, "bucket"), - PublicHost: "127.0.0.1", + Scheme: "http", + Host: "127.0.0.1", + Port: uint16(0), + Writer: testWriter, + StorageRoot: filepath.Join(testTempDir, "bucket"), + PublicHost: "127.0.0.1", + InitialObjects: fakeBucketFiles, } gcsServer, err := fakestorage.NewServerWithOptions(opts) From bf28af64846794bfbf2e4f0f46c851d042823dff Mon Sep 17 00:00:00 2001 From: Shawn Reuland Date: Tue, 16 Jul 2024 13:15:13 -0700 Subject: [PATCH 15/24] #4911: fix go mod tidy --- go.mod | 1 - go.sum | 4 ---- 2 files changed, 5 deletions(-) diff --git a/go.mod b/go.mod index ce6761d49e..531a7ecd3f 100644 --- a/go.mod +++ b/go.mod @@ -63,7 +63,6 @@ require ( github.com/docker/docker v27.0.3+incompatible github.com/docker/go-connections v0.5.0 github.com/fsouza/fake-gcs-server v1.49.2 - github.com/otiai10/copy v1.14.0 ) require ( diff --git a/go.sum b/go.sum index 4b3d6b186e..13d3a0acf0 100644 --- a/go.sum +++ b/go.sum @@ -401,10 +401,6 @@ github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQ github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM= github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= -github.com/otiai10/copy v1.14.0 h1:dCI/t1iTdYGtkvCuBG2BgR6KZa83PTclw4U5n2wAllU= -github.com/otiai10/copy v1.14.0/go.mod h1:ECfuL02W+/FkTWZWgQqXPWZgW9oeKCSQ5qVfSc4qc4w= -github.com/otiai10/mint v1.5.1 h1:XaPLeE+9vGbuyEHem1JNk3bYc7KKqyI/na0/mLd/Kks= -github.com/otiai10/mint v1.5.1/go.mod h1:MJm72SBthJjz8qhefc4z1PYEieWmy8Bku7CjcAqyUSM= github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/pelletier/go-toml/v2 v2.1.0 h1:FnwAJ4oYMvbT/34k9zzHuZNrhlz48GB3/s6at6/MHO4= From 7f1fe6427dd6a1722c434daf730a96301888c287 Mon Sep 17 00:00:00 2001 From: Urvi Date: Tue, 16 Jul 2024 13:14:01 -0700 Subject: [PATCH 16/24] Addressing review comment on --datastore-config parameter validation --- services/horizon/cmd/db.go | 54 ++++++++++++++++----------------- services/horizon/cmd/db_test.go | 4 +-- 2 files changed, 28 insertions(+), 30 deletions(-) diff --git a/services/horizon/cmd/db.go b/services/horizon/cmd/db.go index f4aba19735..09ced1eb9f 100644 --- a/services/horizon/cmd/db.go +++ b/services/horizon/cmd/db.go @@ -173,14 +173,6 @@ func ingestRangeCmdOpts() support.ConfigOptions { OptType: types.String, Required: false, Usage: "[optional] Specify the path to the datastore config file (required for datastore backend)", - CustomSetValue: func(co *support.ConfigOption) error { - val := viper.GetString(co.Name) - if ledgerBackendType == ingest.BufferedStorageBackend && val == "" { - return errors.New("datastore config file is required for datastore backend type") - } - *co.ConfigKey.(*string) = val - return nil - }, }, } } @@ -489,18 +481,13 @@ func DefineDBCommands(rootCmd *cobra.Command, horizonConfig *horizon.Config, hor } } + var err error var storageBackendConfig ingest.StorageBackendConfig options := horizon.ApplyOptions{RequireCaptiveCoreFullConfig: false} if ledgerBackendType == ingest.BufferedStorageBackend { - cfg, err := toml.LoadFile(storageBackendConfigPath) - if err != nil { - return fmt.Errorf("failed to load config file %v: %w", storageBackendConfigPath, err) - } - if err = cfg.Unmarshal(&storageBackendConfig); err != nil { - return fmt.Errorf("error unmarshalling TOML config: %w", err) + if err, storageBackendConfig = loadStorageBackendConfig(storageBackendConfigPath); err != nil { + return err } - storageBackendConfig.BufferedStorageBackendFactory = ledgerbackend.NewBufferedStorageBackend - storageBackendConfig.DataStoreFactory = datastore.NewDataStore // when using buffered storage, performance observations have noted optimal parallel batch size // of 100, apply that as default if the flag was absent. if !viper.IsSet("parallel-job-size") { @@ -509,8 +496,7 @@ func DefineDBCommands(rootCmd *cobra.Command, horizonConfig *horizon.Config, hor options.NoCaptiveCore = true } - err := horizon.ApplyFlags(horizonConfig, horizonFlags, options) - if err != nil { + if err = horizon.ApplyFlags(horizonConfig, horizonFlags, options); err != nil { return err } return runDBReingestRangeFn( @@ -557,23 +543,17 @@ func DefineDBCommands(rootCmd *cobra.Command, horizonConfig *horizon.Config, hor withRange = true } + var err error var storageBackendConfig ingest.StorageBackendConfig options := horizon.ApplyOptions{RequireCaptiveCoreFullConfig: false} if ledgerBackendType == ingest.BufferedStorageBackend { - cfg, err := toml.LoadFile(storageBackendConfigPath) - if err != nil { - return fmt.Errorf("failed to load config file %v: %w", storageBackendConfigPath, err) - } - if err = cfg.Unmarshal(&storageBackendConfig); err != nil { - return fmt.Errorf("error unmarshalling TOML config: %w", err) + if err, storageBackendConfig = loadStorageBackendConfig(storageBackendConfigPath); err != nil { + return err } - storageBackendConfig.BufferedStorageBackendFactory = ledgerbackend.NewBufferedStorageBackend - storageBackendConfig.DataStoreFactory = datastore.NewDataStore options.NoCaptiveCore = true } - err := horizon.ApplyFlags(horizonConfig, horizonFlags, options) - if err != nil { + if err = horizon.ApplyFlags(horizonConfig, horizonFlags, options); err != nil { return err } var gaps []history.LedgerRange @@ -652,6 +632,24 @@ func DefineDBCommands(rootCmd *cobra.Command, horizonConfig *horizon.Config, hor dbReingestCmd.AddCommand(dbReingestRangeCmd) } +func loadStorageBackendConfig(storageBackendConfigPath string) (error, ingest.StorageBackendConfig) { + if storageBackendConfigPath == "" { + return errors.New("datastore config file is required for datastore ledgerbackend type"), ingest.StorageBackendConfig{} + } + cfg, err := toml.LoadFile(storageBackendConfigPath) + if err != nil { + return fmt.Errorf("failed to load datastore ledgerbackend config file %v: %w", storageBackendConfigPath, err), ingest.StorageBackendConfig{} + } + var storageBackendConfig ingest.StorageBackendConfig + if err = cfg.Unmarshal(&storageBackendConfig); err != nil { + return fmt.Errorf("error unmarshalling datastore ledgerbackend TOML config: %w", err), ingest.StorageBackendConfig{} + } + + storageBackendConfig.BufferedStorageBackendFactory = ledgerbackend.NewBufferedStorageBackend + storageBackendConfig.DataStoreFactory = datastore.NewDataStore + return nil, storageBackendConfig +} + func init() { DefineDBCommands(RootCmd, globalConfig, globalFlags) } diff --git a/services/horizon/cmd/db_test.go b/services/horizon/cmd/db_test.go index 93dd1ce119..6a00576bd3 100644 --- a/services/horizon/cmd/db_test.go +++ b/services/horizon/cmd/db_test.go @@ -209,7 +209,7 @@ func (s *DBCommandsTestSuite) TestDbReingestAndFillGapsCmds() { "--datastore-config", "invalid.config.toml", }, expectError: true, - errorMessage: "failed to load config file", + errorMessage: "failed to load datastore ledgerbackend config file", }, { name: "datastore; w/ config", @@ -229,7 +229,7 @@ func (s *DBCommandsTestSuite) TestDbReingestAndFillGapsCmds() { "--ledgerbackend", "datastore", }, expectError: true, - errorMessage: "datastore config file is required for datastore backend type", + errorMessage: "datastore config file is required for datastore ledgerbackend type", }, } From 0f77ac630dca2479343363cd856e8b7683676206 Mon Sep 17 00:00:00 2001 From: Urvi Date: Tue, 16 Jul 2024 15:11:59 -0700 Subject: [PATCH 17/24] Fix static check errors --- services/horizon/cmd/db.go | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/services/horizon/cmd/db.go b/services/horizon/cmd/db.go index 09ced1eb9f..c0d6c8c8ac 100644 --- a/services/horizon/cmd/db.go +++ b/services/horizon/cmd/db.go @@ -485,7 +485,7 @@ func DefineDBCommands(rootCmd *cobra.Command, horizonConfig *horizon.Config, hor var storageBackendConfig ingest.StorageBackendConfig options := horizon.ApplyOptions{RequireCaptiveCoreFullConfig: false} if ledgerBackendType == ingest.BufferedStorageBackend { - if err, storageBackendConfig = loadStorageBackendConfig(storageBackendConfigPath); err != nil { + if storageBackendConfig, err = loadStorageBackendConfig(storageBackendConfigPath); err != nil { return err } // when using buffered storage, performance observations have noted optimal parallel batch size @@ -547,7 +547,7 @@ func DefineDBCommands(rootCmd *cobra.Command, horizonConfig *horizon.Config, hor var storageBackendConfig ingest.StorageBackendConfig options := horizon.ApplyOptions{RequireCaptiveCoreFullConfig: false} if ledgerBackendType == ingest.BufferedStorageBackend { - if err, storageBackendConfig = loadStorageBackendConfig(storageBackendConfigPath); err != nil { + if storageBackendConfig, err = loadStorageBackendConfig(storageBackendConfigPath); err != nil { return err } options.NoCaptiveCore = true @@ -632,22 +632,22 @@ func DefineDBCommands(rootCmd *cobra.Command, horizonConfig *horizon.Config, hor dbReingestCmd.AddCommand(dbReingestRangeCmd) } -func loadStorageBackendConfig(storageBackendConfigPath string) (error, ingest.StorageBackendConfig) { +func loadStorageBackendConfig(storageBackendConfigPath string) (ingest.StorageBackendConfig, error) { if storageBackendConfigPath == "" { - return errors.New("datastore config file is required for datastore ledgerbackend type"), ingest.StorageBackendConfig{} + return ingest.StorageBackendConfig{}, errors.New("datastore config file is required for datastore ledgerbackend type") } cfg, err := toml.LoadFile(storageBackendConfigPath) if err != nil { - return fmt.Errorf("failed to load datastore ledgerbackend config file %v: %w", storageBackendConfigPath, err), ingest.StorageBackendConfig{} + return ingest.StorageBackendConfig{}, fmt.Errorf("failed to load datastore ledgerbackend config file %v: %w", storageBackendConfigPath, err) } var storageBackendConfig ingest.StorageBackendConfig if err = cfg.Unmarshal(&storageBackendConfig); err != nil { - return fmt.Errorf("error unmarshalling datastore ledgerbackend TOML config: %w", err), ingest.StorageBackendConfig{} + return ingest.StorageBackendConfig{}, fmt.Errorf("error unmarshalling datastore ledgerbackend TOML config: %w", err) } storageBackendConfig.BufferedStorageBackendFactory = ledgerbackend.NewBufferedStorageBackend storageBackendConfig.DataStoreFactory = datastore.NewDataStore - return nil, storageBackendConfig + return storageBackendConfig, nil } func init() { From 8c1adae2482ea796b5132c06645a681e780173cd Mon Sep 17 00:00:00 2001 From: Urvi Date: Tue, 16 Jul 2024 15:28:10 -0700 Subject: [PATCH 18/24] Remove unused parameter from datastore::GetSchema --- .../ledgerbackend/buffered_storage_backend.go | 2 +- .../buffered_storage_backend_test.go | 18 +++++++++--------- ingest/ledgerbackend/ledger_buffer.go | 6 +++--- support/datastore/datastore.go | 2 +- support/datastore/gcs_datastore.go | 2 +- support/datastore/mocks.go | 4 ++-- 6 files changed, 17 insertions(+), 17 deletions(-) diff --git a/ingest/ledgerbackend/buffered_storage_backend.go b/ingest/ledgerbackend/buffered_storage_backend.go index e4e81513b6..65b0a0dd55 100644 --- a/ingest/ledgerbackend/buffered_storage_backend.go +++ b/ingest/ledgerbackend/buffered_storage_backend.go @@ -54,7 +54,7 @@ func NewBufferedStorageBackend(ctx context.Context, config BufferedStorageBacken return nil, errors.New("number of workers must be <= BufferSize") } - if dataStore.GetSchema(ctx).LedgersPerFile <= 0 { + if dataStore.GetSchema().LedgersPerFile <= 0 { return nil, errors.New("ledgersPerFile must be > 0") } diff --git a/ingest/ledgerbackend/buffered_storage_backend_test.go b/ingest/ledgerbackend/buffered_storage_backend_test.go index 510cffeabb..a64f715947 100644 --- a/ingest/ledgerbackend/buffered_storage_backend_test.go +++ b/ingest/ledgerbackend/buffered_storage_backend_test.go @@ -78,7 +78,7 @@ func createMockdataStore(t *testing.T, start, end, partitionSize, count uint32) } mockDataStore.On("GetFile", mock.Anything, objectName).Return(readCloser, nil) } - mockDataStore.On("GetSchema", mock.Anything).Return(datastore.DataStoreSchema{ + mockDataStore.On("GetSchema").Return(datastore.DataStoreSchema{ LedgersPerFile: count, FilesPerPartition: partitionSize, }) @@ -125,7 +125,7 @@ func TestNewBufferedStorageBackend(t *testing.T) { ctx := context.Background() config := createBufferedStorageBackendConfigForTesting() mockDataStore := new(datastore.MockDataStore) - mockDataStore.On("GetSchema", mock.Anything).Return(datastore.DataStoreSchema{ + mockDataStore.On("GetSchema").Return(datastore.DataStoreSchema{ LedgersPerFile: uint32(1), FilesPerPartition: partitionSize, }) @@ -133,8 +133,8 @@ func TestNewBufferedStorageBackend(t *testing.T) { assert.NoError(t, err) assert.Equal(t, bsb.dataStore, mockDataStore) - assert.Equal(t, uint32(1), bsb.dataStore.GetSchema(ctx).LedgersPerFile) - assert.Equal(t, uint32(64000), bsb.dataStore.GetSchema(ctx).FilesPerPartition) + assert.Equal(t, uint32(1), bsb.dataStore.GetSchema().LedgersPerFile) + assert.Equal(t, uint32(64000), bsb.dataStore.GetSchema().FilesPerPartition) assert.Equal(t, uint32(100), bsb.config.BufferSize) assert.Equal(t, uint32(5), bsb.config.NumWorkers) assert.Equal(t, uint32(3), bsb.config.RetryLimit) @@ -214,7 +214,7 @@ func TestCloudStorageGetLedger_MultipleLedgerPerFile(t *testing.T) { mockDataStore := createMockdataStore(t, startLedger, endLedger, partitionSize, 2) bsb.dataStore = mockDataStore - mockDataStore.On("GetSchema", mock.Anything).Return(datastore.DataStoreSchema{ + mockDataStore.On("GetSchema").Return(datastore.DataStoreSchema{ LedgersPerFile: uint32(2), FilesPerPartition: partitionSize, }) @@ -453,7 +453,7 @@ func TestLedgerBufferClose(t *testing.T) { mockDataStore := new(datastore.MockDataStore) partition := ledgerPerFileCount*partitionSize - 1 - mockDataStore.On("GetSchema", mock.Anything).Return(datastore.DataStoreSchema{ + mockDataStore.On("GetSchema").Return(datastore.DataStoreSchema{ LedgersPerFile: ledgerPerFileCount, FilesPerPartition: partitionSize, }) @@ -489,7 +489,7 @@ func TestLedgerBufferBoundedObjectNotFound(t *testing.T) { mockDataStore := new(datastore.MockDataStore) partition := ledgerPerFileCount*partitionSize - 1 - mockDataStore.On("GetSchema", mock.Anything).Return(datastore.DataStoreSchema{ + mockDataStore.On("GetSchema").Return(datastore.DataStoreSchema{ LedgersPerFile: ledgerPerFileCount, FilesPerPartition: partitionSize, }) @@ -518,7 +518,7 @@ func TestLedgerBufferUnboundedObjectNotFound(t *testing.T) { mockDataStore := new(datastore.MockDataStore) partition := ledgerPerFileCount*partitionSize - 1 - mockDataStore.On("GetSchema", mock.Anything).Return(datastore.DataStoreSchema{ + mockDataStore.On("GetSchema").Return(datastore.DataStoreSchema{ LedgersPerFile: ledgerPerFileCount, FilesPerPartition: partitionSize, }) @@ -563,7 +563,7 @@ func TestLedgerBufferRetryLimit(t *testing.T) { }) bsb.dataStore = mockDataStore - mockDataStore.On("GetSchema", mock.Anything).Return(datastore.DataStoreSchema{ + mockDataStore.On("GetSchema").Return(datastore.DataStoreSchema{ LedgersPerFile: ledgerPerFileCount, FilesPerPartition: partitionSize, }) diff --git a/ingest/ledgerbackend/ledger_buffer.go b/ingest/ledgerbackend/ledger_buffer.go index 3fea296b20..f06f832b87 100644 --- a/ingest/ledgerbackend/ledger_buffer.go +++ b/ingest/ledgerbackend/ledger_buffer.go @@ -95,7 +95,7 @@ func (lb *ledgerBuffer) pushTaskQueue(ctx context.Context) { return } lb.taskQueue <- lb.nextTaskLedger - lb.nextTaskLedger += lb.dataStore.GetSchema(ctx).LedgersPerFile + lb.nextTaskLedger += lb.dataStore.GetSchema().LedgersPerFile } // sleepWithContext returns true upon sleeping without interruption from the context @@ -163,7 +163,7 @@ func (lb *ledgerBuffer) worker(ctx context.Context) { } func (lb *ledgerBuffer) downloadLedgerObject(ctx context.Context, sequence uint32) ([]byte, error) { - objectKey := lb.dataStore.GetSchema(ctx).GetObjectKeyFromSequenceNumber(sequence) + objectKey := lb.dataStore.GetSchema().GetObjectKeyFromSequenceNumber(sequence) reader, err := lb.dataStore.GetFile(ctx, objectKey) if err != nil { @@ -198,7 +198,7 @@ func (lb *ledgerBuffer) storeObject(ctx context.Context, ledgerObject []byte, se for lb.ledgerPriorityQueue.Len() > 0 && lb.currentLedger == uint32(lb.ledgerPriorityQueue.Peek().startLedger) { item := lb.ledgerPriorityQueue.Pop() lb.ledgerQueue <- item.payload - lb.currentLedger += lb.dataStore.GetSchema(ctx).LedgersPerFile + lb.currentLedger += lb.dataStore.GetSchema().LedgersPerFile } } diff --git a/support/datastore/datastore.go b/support/datastore/datastore.go index e91b3f009d..72cb52d65b 100644 --- a/support/datastore/datastore.go +++ b/support/datastore/datastore.go @@ -23,7 +23,7 @@ type DataStore interface { PutFileIfNotExists(ctx context.Context, path string, in io.WriterTo, metaData map[string]string) (bool, error) Exists(ctx context.Context, path string) (bool, error) Size(ctx context.Context, path string) (int64, error) - GetSchema(ctx context.Context) DataStoreSchema + GetSchema() DataStoreSchema Close() error } diff --git a/support/datastore/gcs_datastore.go b/support/datastore/gcs_datastore.go index 3cf48b3fcb..ab1bc669b5 100644 --- a/support/datastore/gcs_datastore.go +++ b/support/datastore/gcs_datastore.go @@ -182,6 +182,6 @@ func (b GCSDataStore) putFile(ctx context.Context, filePath string, in io.Writer // GetSchema returns the schema information which defines the structure // and organization of data in the datastore. -func (b GCSDataStore) GetSchema(ctx context.Context) DataStoreSchema { +func (b GCSDataStore) GetSchema() DataStoreSchema { return b.schema } diff --git a/support/datastore/mocks.go b/support/datastore/mocks.go index 6b505e6478..2fa39a4712 100644 --- a/support/datastore/mocks.go +++ b/support/datastore/mocks.go @@ -47,8 +47,8 @@ func (m *MockDataStore) Close() error { return args.Error(0) } -func (m *MockDataStore) GetSchema(ctx context.Context) DataStoreSchema { - args := m.Called(ctx) +func (m *MockDataStore) GetSchema() DataStoreSchema { + args := m.Called() return args.Get(0).(DataStoreSchema) } From e8261f8b07836bea2b95b04ccecffeb4a60f7ff2 Mon Sep 17 00:00:00 2001 From: Urvi Date: Tue, 16 Jul 2024 16:13:32 -0700 Subject: [PATCH 19/24] Remove unused context parameter --- ingest/ledgerbackend/buffered_storage_backend.go | 4 ++-- ingest/ledgerbackend/buffered_storage_backend_test.go | 3 +-- ingest/ledgerbackend/ledger_buffer.go | 10 +++++----- services/horizon/internal/ingest/main.go | 2 +- services/horizon/internal/ingest/main_test.go | 2 +- 5 files changed, 10 insertions(+), 11 deletions(-) diff --git a/ingest/ledgerbackend/buffered_storage_backend.go b/ingest/ledgerbackend/buffered_storage_backend.go index 65b0a0dd55..e7fcdc86f4 100644 --- a/ingest/ledgerbackend/buffered_storage_backend.go +++ b/ingest/ledgerbackend/buffered_storage_backend.go @@ -17,7 +17,7 @@ import ( // Ensure BufferedStorageBackend implements LedgerBackend var _ LedgerBackend = (*BufferedStorageBackend)(nil) -type BufferedStorageBackendFactory func(ctx context.Context, config BufferedStorageBackendConfig, dataStore datastore.DataStore) (*BufferedStorageBackend, error) +type BufferedStorageBackendFactory func(config BufferedStorageBackendConfig, dataStore datastore.DataStore) (*BufferedStorageBackend, error) type BufferedStorageBackendConfig struct { BufferSize uint32 `toml:"buffer_size"` @@ -45,7 +45,7 @@ type BufferedStorageBackend struct { } // NewBufferedStorageBackend returns a new BufferedStorageBackend instance. -func NewBufferedStorageBackend(ctx context.Context, config BufferedStorageBackendConfig, dataStore datastore.DataStore) (*BufferedStorageBackend, error) { +func NewBufferedStorageBackend(config BufferedStorageBackendConfig, dataStore datastore.DataStore) (*BufferedStorageBackend, error) { if config.BufferSize == 0 { return nil, errors.New("buffer size must be > 0") } diff --git a/ingest/ledgerbackend/buffered_storage_backend_test.go b/ingest/ledgerbackend/buffered_storage_backend_test.go index a64f715947..ca2711c40d 100644 --- a/ingest/ledgerbackend/buffered_storage_backend_test.go +++ b/ingest/ledgerbackend/buffered_storage_backend_test.go @@ -122,14 +122,13 @@ func createLCMBatchReader(start, end, count uint32) io.ReadCloser { } func TestNewBufferedStorageBackend(t *testing.T) { - ctx := context.Background() config := createBufferedStorageBackendConfigForTesting() mockDataStore := new(datastore.MockDataStore) mockDataStore.On("GetSchema").Return(datastore.DataStoreSchema{ LedgersPerFile: uint32(1), FilesPerPartition: partitionSize, }) - bsb, err := NewBufferedStorageBackend(ctx, config, mockDataStore) + bsb, err := NewBufferedStorageBackend(config, mockDataStore) assert.NoError(t, err) assert.Equal(t, bsb.dataStore, mockDataStore) diff --git a/ingest/ledgerbackend/ledger_buffer.go b/ingest/ledgerbackend/ledger_buffer.go index f06f832b87..6965461bba 100644 --- a/ingest/ledgerbackend/ledger_buffer.go +++ b/ingest/ledgerbackend/ledger_buffer.go @@ -83,13 +83,13 @@ func (bsb *BufferedStorageBackend) newLedgerBuffer(ledgerRange Range) (*ledgerBu // but for easier conceptualization, len(taskQueue) can be interpreted as both pending and in-flight tasks // where we assume the workers are empty and not processing any tasks. for i := 0; i <= int(bsb.config.BufferSize); i++ { - ledgerBuffer.pushTaskQueue(ctx) + ledgerBuffer.pushTaskQueue() } return ledgerBuffer, nil } -func (lb *ledgerBuffer) pushTaskQueue(ctx context.Context) { +func (lb *ledgerBuffer) pushTaskQueue() { // In bounded mode, don't queue past the end ledger if lb.nextTaskLedger > lb.ledgerRange.to && lb.ledgerRange.bounded { return @@ -155,7 +155,7 @@ func (lb *ledgerBuffer) worker(ctx context.Context) { // Thus, the number of tasks decreases by 1 and the priority queue length increases by 1. // This keeps the overall total the same (<= BufferSize). As long as the the ledger buffer invariant // was maintained in the previous state, it is still maintained during this state transition. - lb.storeObject(ctx, ledgerObject, sequence) + lb.storeObject(ledgerObject, sequence) break } } @@ -180,7 +180,7 @@ func (lb *ledgerBuffer) downloadLedgerObject(ctx context.Context, sequence uint3 return objectBytes, nil } -func (lb *ledgerBuffer) storeObject(ctx context.Context, ledgerObject []byte, sequence uint32) { +func (lb *ledgerBuffer) storeObject(ledgerObject []byte, sequence uint32) { lb.priorityQueueLock.Lock() defer lb.priorityQueueLock.Unlock() @@ -215,7 +215,7 @@ func (lb *ledgerBuffer) getFromLedgerQueue(ctx context.Context) (xdr.LedgerClose // Thus len(ledgerQueue) decreases by 1 and the number of tasks increases by 1. // The overall sum below remains the same: // len(taskQueue) + len(ledgerQueue) + ledgerPriorityQueue.Len() <= bsb.config.BufferSize - lb.pushTaskQueue(ctx) + lb.pushTaskQueue() lcmBatch := xdr.LedgerCloseMetaBatch{} decoder := compressxdr.NewXDRDecoder(compressxdr.DefaultCompressor, &lcmBatch) diff --git a/services/horizon/internal/ingest/main.go b/services/horizon/internal/ingest/main.go index 69fcd8e209..685a7cea2a 100644 --- a/services/horizon/internal/ingest/main.go +++ b/services/horizon/internal/ingest/main.go @@ -300,7 +300,7 @@ func NewSystem(config Config) (System, error) { cancel() return nil, fmt.Errorf("failed to create datastore: %w", err) } - ledgerBackend, err = config.StorageBackendConfig.BufferedStorageBackendFactory(ctx, config.StorageBackendConfig.BufferedStorageBackendConfig, dataStore) + ledgerBackend, err = config.StorageBackendConfig.BufferedStorageBackendFactory(config.StorageBackendConfig.BufferedStorageBackendConfig, dataStore) if err != nil { cancel() return nil, fmt.Errorf("failed to create buffered storage backend: %w", err) diff --git a/services/horizon/internal/ingest/main_test.go b/services/horizon/internal/ingest/main_test.go index c3c278f778..b4ce791eed 100644 --- a/services/horizon/internal/ingest/main_test.go +++ b/services/horizon/internal/ingest/main_test.go @@ -125,7 +125,7 @@ func TestNewSystemBuffered(t *testing.T) { DataStoreFactory: func(ctx context.Context, datastoreConfig datastore.DataStoreConfig) (datastore.DataStore, error) { return mockDataStore, nil }, - BufferedStorageBackendFactory: func(ctx context.Context, config ledgerbackend.BufferedStorageBackendConfig, dataStore datastore.DataStore) (*ledgerbackend.BufferedStorageBackend, error) { + BufferedStorageBackendFactory: func(config ledgerbackend.BufferedStorageBackendConfig, dataStore datastore.DataStore) (*ledgerbackend.BufferedStorageBackend, error) { return bufferedStorageBackend, nil }, }, From 08f3ab29ef263c0b3fa3217368d0dd6c3783185b Mon Sep 17 00:00:00 2001 From: Shawn Reuland Date: Wed, 17 Jul 2024 10:58:40 -0700 Subject: [PATCH 20/24] #4911: removed NewSystem unit test and related factory creation methods for DataStore, BufferedStorageBackend, review feedback --- .../ledgerbackend/buffered_storage_backend.go | 2 -- services/horizon/cmd/db.go | 4 --- services/horizon/internal/ingest/main.go | 10 +++---- services/horizon/internal/ingest/main_test.go | 27 ------------------- support/datastore/datastore.go | 2 -- 5 files changed, 4 insertions(+), 41 deletions(-) diff --git a/ingest/ledgerbackend/buffered_storage_backend.go b/ingest/ledgerbackend/buffered_storage_backend.go index e7fcdc86f4..aa70336295 100644 --- a/ingest/ledgerbackend/buffered_storage_backend.go +++ b/ingest/ledgerbackend/buffered_storage_backend.go @@ -17,8 +17,6 @@ import ( // Ensure BufferedStorageBackend implements LedgerBackend var _ LedgerBackend = (*BufferedStorageBackend)(nil) -type BufferedStorageBackendFactory func(config BufferedStorageBackendConfig, dataStore datastore.DataStore) (*BufferedStorageBackend, error) - type BufferedStorageBackendConfig struct { BufferSize uint32 `toml:"buffer_size"` NumWorkers uint32 `toml:"num_workers"` diff --git a/services/horizon/cmd/db.go b/services/horizon/cmd/db.go index c0d6c8c8ac..92a732e002 100644 --- a/services/horizon/cmd/db.go +++ b/services/horizon/cmd/db.go @@ -15,14 +15,12 @@ import ( "github.com/spf13/cobra" "github.com/spf13/viper" - "github.com/stellar/go/ingest/ledgerbackend" horizon "github.com/stellar/go/services/horizon/internal" "github.com/stellar/go/services/horizon/internal/db2/history" "github.com/stellar/go/services/horizon/internal/db2/schema" "github.com/stellar/go/services/horizon/internal/ingest" "github.com/stellar/go/support/config" support "github.com/stellar/go/support/config" - "github.com/stellar/go/support/datastore" "github.com/stellar/go/support/db" "github.com/stellar/go/support/errors" hlog "github.com/stellar/go/support/log" @@ -645,8 +643,6 @@ func loadStorageBackendConfig(storageBackendConfigPath string) (ingest.StorageBa return ingest.StorageBackendConfig{}, fmt.Errorf("error unmarshalling datastore ledgerbackend TOML config: %w", err) } - storageBackendConfig.BufferedStorageBackendFactory = ledgerbackend.NewBufferedStorageBackend - storageBackendConfig.DataStoreFactory = datastore.NewDataStore return storageBackendConfig, nil } diff --git a/services/horizon/internal/ingest/main.go b/services/horizon/internal/ingest/main.go index 685a7cea2a..c9eec2f2f6 100644 --- a/services/horizon/internal/ingest/main.go +++ b/services/horizon/internal/ingest/main.go @@ -101,10 +101,8 @@ func (s LedgerBackendType) String() string { } type StorageBackendConfig struct { - DataStoreConfig datastore.DataStoreConfig `toml:"datastore_config"` - DataStoreFactory datastore.DataStoreFactory - BufferedStorageBackendConfig ledgerbackend.BufferedStorageBackendConfig `toml:"buffered_storage_backend_config"` - BufferedStorageBackendFactory ledgerbackend.BufferedStorageBackendFactory + DataStoreConfig datastore.DataStoreConfig `toml:"datastore_config"` + BufferedStorageBackendConfig ledgerbackend.BufferedStorageBackendConfig `toml:"buffered_storage_backend_config"` } type Config struct { @@ -295,12 +293,12 @@ func NewSystem(config Config) (System, error) { if config.LedgerBackendType == BufferedStorageBackend { // Ingest from datastore var dataStore datastore.DataStore - dataStore, err = config.StorageBackendConfig.DataStoreFactory(context.Background(), config.StorageBackendConfig.DataStoreConfig) + dataStore, err = datastore.NewDataStore(context.Background(), config.StorageBackendConfig.DataStoreConfig) if err != nil { cancel() return nil, fmt.Errorf("failed to create datastore: %w", err) } - ledgerBackend, err = config.StorageBackendConfig.BufferedStorageBackendFactory(config.StorageBackendConfig.BufferedStorageBackendConfig, dataStore) + ledgerBackend, err = ledgerbackend.NewBufferedStorageBackend(config.StorageBackendConfig.BufferedStorageBackendConfig, dataStore) if err != nil { cancel() return nil, fmt.Errorf("failed to create buffered storage backend: %w", err) diff --git a/services/horizon/internal/ingest/main_test.go b/services/horizon/internal/ingest/main_test.go index b4ce791eed..4f0e220ebe 100644 --- a/services/horizon/internal/ingest/main_test.go +++ b/services/horizon/internal/ingest/main_test.go @@ -17,7 +17,6 @@ import ( "github.com/stellar/go/ingest" "github.com/stellar/go/ingest/ledgerbackend" "github.com/stellar/go/services/horizon/internal/db2/history" - "github.com/stellar/go/support/datastore" "github.com/stellar/go/support/db" "github.com/stellar/go/support/errors" logpkg "github.com/stellar/go/support/log" @@ -111,32 +110,6 @@ func TestNewSystem(t *testing.T) { assert.Equal(t, system.maxLedgerPerFlush, MaxLedgersPerFlush) } -func TestNewSystemBuffered(t *testing.T) { - mockDataStore := &datastore.MockDataStore{} - bufferedStorageBackend := &ledgerbackend.BufferedStorageBackend{} - config := Config{ - HistorySession: &db.Session{DB: &sqlx.DB{}}, - HistoryArchiveURLs: []string{"https://history.stellar.org/prd/core-live/core_live_001"}, - CheckpointFrequency: 64, - LedgerBackendType: BufferedStorageBackend, - StorageBackendConfig: StorageBackendConfig{ - DataStoreConfig: datastore.DataStoreConfig{Type: "GCS", Params: map[string]string{"destination_bucket_path": "Test"}}, - BufferedStorageBackendConfig: ledgerbackend.BufferedStorageBackendConfig{NumWorkers: 1, BufferSize: 2}, - DataStoreFactory: func(ctx context.Context, datastoreConfig datastore.DataStoreConfig) (datastore.DataStore, error) { - return mockDataStore, nil - }, - BufferedStorageBackendFactory: func(config ledgerbackend.BufferedStorageBackendConfig, dataStore datastore.DataStore) (*ledgerbackend.BufferedStorageBackend, error) { - return bufferedStorageBackend, nil - }, - }, - } - - sIface, err := NewSystem(config) - assert.NoError(t, err) - system := sIface.(*system) - assert.Same(t, system.ledgerBackend, bufferedStorageBackend) -} - // Custom comparator function.This function is needed because structs in Go that contain function fields // cannot be directly compared using assert.Equal, so here we compare each individual field, skipping the function fields. func CompareConfigs(t *testing.T, expected, actual Config) bool { diff --git a/support/datastore/datastore.go b/support/datastore/datastore.go index 72cb52d65b..961ba99545 100644 --- a/support/datastore/datastore.go +++ b/support/datastore/datastore.go @@ -13,8 +13,6 @@ type DataStoreConfig struct { Schema DataStoreSchema `toml:"schema"` } -type DataStoreFactory func(ctx context.Context, datastoreConfig DataStoreConfig) (DataStore, error) - // DataStore defines an interface for interacting with data storage type DataStore interface { GetFileMetadata(ctx context.Context, path string) (map[string]string, error) From 026d41e795f957210d74663067a71f5895b7d183 Mon Sep 17 00:00:00 2001 From: Shawn Reuland Date: Tue, 16 Jul 2024 20:55:38 -0700 Subject: [PATCH 21/24] #4911: try refactor integration.NewTest to allow only web process and rw db, migrated --- .../horizon/internal/integration/db_test.go | 97 ++---------- .../internal/integration/parameters_test.go | 58 ++++---- .../internal/test/integration/integration.go | 138 +++++++++++------- 3 files changed, 126 insertions(+), 167 deletions(-) diff --git a/services/horizon/internal/integration/db_test.go b/services/horizon/internal/integration/db_test.go index bcc6f3ebb0..5a2b03e48b 100644 --- a/services/horizon/internal/integration/db_test.go +++ b/services/horizon/internal/integration/db_test.go @@ -4,7 +4,6 @@ import ( "context" "fmt" "io/fs" - "net" "os" "path/filepath" "strconv" @@ -12,15 +11,12 @@ import ( "time" "github.com/fsouza/fake-gcs-server/fakestorage" - "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/stellar/go/clients/horizonclient" - sdk "github.com/stellar/go/clients/horizonclient" "github.com/stellar/go/historyarchive" "github.com/stellar/go/keypair" - "github.com/stellar/go/network" hProtocol "github.com/stellar/go/protocols/horizon" horizoncmd "github.com/stellar/go/services/horizon/cmd" horizon "github.com/stellar/go/services/horizon/internal" @@ -33,7 +29,6 @@ import ( "github.com/stellar/go/support/db/dbtest" "github.com/stellar/go/txnbuild" "github.com/stellar/go/xdr" - "github.com/stellar/throttled" ) func submitLiquidityPoolOps(itest *integration.Test, tt *assert.Assertions) (submittedOperations []txnbuild.Operation, lastLedger int32) { @@ -561,22 +556,19 @@ func TestReingestDB(t *testing.T) { } func TestReingestDatastore(t *testing.T) { - if os.Getenv("HORIZON_INTEGRATION_TESTS_ENABLED") == "" { - t.Skip("skipping integration test: HORIZON_INTEGRATION_TESTS_ENABLED not set") - } - - newDB := dbtest.Postgres(t) - defer newDB.Close() - var rootCmd = horizoncmd.NewRootCmd() - rootCmd.SetArgs([]string{ - "db", "migrate", "up", "--db-url", newDB.DSN}) - require.NoError(t, rootCmd.Execute()) + test := integration.NewTest(t, integration.Config{ + SkipHorizonStart: true, + SkipCoreContainerCreation: true, + }) + err := test.StartHorizon(false) + assert.NoError(t, err) + test.WaitForHorizonWeb() testTempDir := t.TempDir() fakeBucketFilesSource := "testdata/testbucket" fakeBucketFiles := []fakestorage.Object{} - if err := filepath.WalkDir(fakeBucketFilesSource, func(path string, entry fs.DirEntry, err error) error { + if err = filepath.WalkDir(fakeBucketFilesSource, func(path string, entry fs.DirEntry, err error) error { if err != nil { return err } @@ -621,11 +613,11 @@ func TestReingestDatastore(t *testing.T) { t.Logf("fake gcs server started at %v", gcsServer.URL()) t.Setenv("STORAGE_EMULATOR_HOST", gcsServer.URL()) - rootCmd = horizoncmd.NewRootCmd() + rootCmd := horizoncmd.NewRootCmd() rootCmd.SetArgs([]string{"db", "reingest", "range", - "--db-url", newDB.DSN, + "--db-url", test.GetTestDB().DSN, "--network", "testnet", "--parallel-workers", "1", "--ledgerbackend", "datastore", @@ -635,73 +627,10 @@ func TestReingestDatastore(t *testing.T) { require.NoError(t, rootCmd.Execute()) - listener, webApp, webPort, err := dynamicHorizonWeb(newDB.DSN) - if err != nil { - t.Fatalf("couldn't create and start horizon web app on dynamic port %v", err) - } - - webAppDone := make(chan struct{}) - go func() { - defer close(webAppDone) - if err = listener.Close(); err != nil { - return - } - webApp.Serve() - }() - - defer func() { - webApp.Close() - select { - case <-webAppDone: - return - default: - } - }() - - horizonClient := &sdk.Client{ - HorizonURL: fmt.Sprintf("http://localhost:%v", webPort), - } - - // wait until the web server is up before continuing to test requests - require.Eventually(t, func() bool { - if _, horizonErr := horizonClient.Root(); horizonErr != nil { - return false - } - return true - }, time.Second*15, time.Millisecond*100) - - _, err = horizonClient.LedgerDetail(998) + _, err = test.Client().LedgerDetail(998) require.NoError(t, err) } -func dynamicHorizonWeb(dsn string) (net.Listener, *horizon.App, uint, error) { - listener, err := net.Listen("tcp", "localhost:0") - if err != nil { - return nil, nil, 0, err - } - webPort := uint(listener.Addr().(*net.TCPAddr).Port) - - webApp, err := horizon.NewApp(horizon.Config{ - DatabaseURL: dsn, - Port: webPort, - NetworkPassphrase: network.TestNetworkPassphrase, - LogLevel: logrus.InfoLevel, - DisableTxSub: true, - Ingest: false, - ConnectionTimeout: 10 * time.Second, - RateQuota: &throttled.RateQuota{ - MaxRate: throttled.PerHour(1000), - MaxBurst: 100, - }, - }) - if err != nil { - listener.Close() - return nil, nil, 0, err - } - - return listener, webApp, webPort, nil -} - func TestReingestDBWithFilterRules(t *testing.T) { itest, _ := initializeDBIntegrationTest(t) tt := assert.New(t) @@ -833,7 +762,7 @@ func TestReingestDBWithFilterRules(t *testing.T) { }() // wait until the web server is up before continuing to test requests - itest.WaitForHorizon() + itest.WaitForHorizonIngest() // Make sure that a tx from non-whitelisted account is not stored after reingestion _, err = itest.Client().TransactionDetail(nonWhiteListTxResp.Hash) @@ -1056,7 +985,7 @@ func TestResumeFromInitializedDB(t *testing.T) { tt := assert.New(t) // Stop the integration test, and restart it with the same database - err := itest.RestartHorizon() + err := itest.RestartHorizon(true) tt.NoError(err) successfullyResumed := func() bool { diff --git a/services/horizon/internal/integration/parameters_test.go b/services/horizon/internal/integration/parameters_test.go index 333ed744c6..3d131ba77d 100644 --- a/services/horizon/internal/integration/parameters_test.go +++ b/services/horizon/internal/integration/parameters_test.go @@ -76,7 +76,7 @@ func TestBucketDirDisallowed(t *testing.T) { horizon.StellarCoreBinaryPathName: os.Getenv("CAPTIVE_CORE_BIN"), } test := integration.NewTest(t, *testConfig) - err := test.StartHorizon() + err := test.StartHorizon(true) assert.Equal(t, err.Error(), integration.HorizonInitErrStr+": error generating captive core configuration:"+ " invalid captive core toml file: could not unmarshal captive core toml: setting BUCKET_DIR_PATH is disallowed"+ " for Captive Core, use CAPTIVE_CORE_STORAGE_PATH instead") @@ -110,9 +110,9 @@ func TestEnvironmentPreserved(t *testing.T) { } test := integration.NewTest(t, *testConfig) - err = test.StartHorizon() + err = test.StartHorizon(true) assert.NoError(t, err) - test.WaitForHorizon() + test.WaitForHorizonIngest() envValue := os.Getenv("STELLAR_CORE_URL") assert.Equal(t, StellarCoreURL, envValue) @@ -160,7 +160,7 @@ func TestInvalidNetworkParameters(t *testing.T) { testConfig.SkipCoreContainerCreation = true testConfig.HorizonIngestParameters = localParams test := integration.NewTest(t, *testConfig) - err := test.StartHorizon() + err := test.StartHorizon(true) // Adding sleep as a workaround for the race condition in the ingestion system. // https://github.com/stellar/go/issues/5005 time.Sleep(2 * time.Second) @@ -204,7 +204,7 @@ func TestNetworkParameter(t *testing.T) { testConfig.SkipCoreContainerCreation = true testConfig.HorizonIngestParameters = localParams test := integration.NewTest(t, *testConfig) - err := test.StartHorizon() + err := test.StartHorizon(true) // Adding sleep as a workaround for the race condition in the ingestion system. // https://github.com/stellar/go/issues/5005 time.Sleep(2 * time.Second) @@ -247,7 +247,7 @@ func TestNetworkEnvironmentVariable(t *testing.T) { testConfig.HorizonIngestParameters = networkParamArgs testConfig.HorizonEnvironment = map[string]string{"NETWORK": networkValue} test := integration.NewTest(t, *testConfig) - err := test.StartHorizon() + err := test.StartHorizon(true) // Adding sleep here as a workaround for the race condition in the ingestion system. // More details can be found at https://github.com/stellar/go/issues/5005 time.Sleep(2 * time.Second) @@ -270,9 +270,9 @@ func TestCaptiveCoreConfigFilesystemState(t *testing.T) { testConfig.HorizonIngestParameters = localParams test := integration.NewTest(t, *testConfig) - err := test.StartHorizon() + err := test.StartHorizon(true) assert.NoError(t, err) - test.WaitForHorizon() + test.WaitForHorizonIngest() t.Run("disk state", func(t *testing.T) { validateCaptiveCoreDiskState(test, storagePath) @@ -286,9 +286,9 @@ func TestCaptiveCoreConfigFilesystemState(t *testing.T) { func TestMaxAssetsForPathRequests(t *testing.T) { t.Run("default", func(t *testing.T) { test := integration.NewTest(t, *integration.GetTestConfig()) - err := test.StartHorizon() + err := test.StartHorizon(true) assert.NoError(t, err) - test.WaitForHorizon() + test.WaitForHorizonIngest() assert.Equal(t, test.HorizonIngest().Config().MaxAssetsPerPathRequest, 15) test.Shutdown() }) @@ -296,9 +296,9 @@ func TestMaxAssetsForPathRequests(t *testing.T) { testConfig := integration.GetTestConfig() testConfig.HorizonIngestParameters = map[string]string{"max-assets-per-path-request": "2"} test := integration.NewTest(t, *testConfig) - err := test.StartHorizon() + err := test.StartHorizon(true) assert.NoError(t, err) - test.WaitForHorizon() + test.WaitForHorizonIngest() assert.Equal(t, test.HorizonIngest().Config().MaxAssetsPerPathRequest, 2) test.Shutdown() }) @@ -307,9 +307,9 @@ func TestMaxAssetsForPathRequests(t *testing.T) { func TestMaxPathFindingRequests(t *testing.T) { t.Run("default", func(t *testing.T) { test := integration.NewTest(t, *integration.GetTestConfig()) - err := test.StartHorizon() + err := test.StartHorizon(true) assert.NoError(t, err) - test.WaitForHorizon() + test.WaitForHorizonIngest() assert.Equal(t, test.HorizonIngest().Config().MaxPathFindingRequests, uint(0)) _, ok := test.HorizonIngest().Paths().(simplepath.InMemoryFinder) assert.True(t, ok) @@ -319,9 +319,9 @@ func TestMaxPathFindingRequests(t *testing.T) { testConfig := integration.GetTestConfig() testConfig.HorizonIngestParameters = map[string]string{"max-path-finding-requests": "5"} test := integration.NewTest(t, *testConfig) - err := test.StartHorizon() + err := test.StartHorizon(true) assert.NoError(t, err) - test.WaitForHorizon() + test.WaitForHorizonIngest() assert.Equal(t, test.HorizonIngest().Config().MaxPathFindingRequests, uint(5)) finder, ok := test.HorizonIngest().Paths().(*paths.RateLimitedFinder) assert.True(t, ok) @@ -333,9 +333,9 @@ func TestMaxPathFindingRequests(t *testing.T) { func TestDisablePathFinding(t *testing.T) { t.Run("default", func(t *testing.T) { test := integration.NewTest(t, *integration.GetTestConfig()) - err := test.StartHorizon() + err := test.StartHorizon(true) assert.NoError(t, err) - test.WaitForHorizon() + test.WaitForHorizonIngest() assert.Equal(t, test.HorizonIngest().Config().MaxPathFindingRequests, uint(0)) _, ok := test.HorizonIngest().Paths().(simplepath.InMemoryFinder) assert.True(t, ok) @@ -345,9 +345,9 @@ func TestDisablePathFinding(t *testing.T) { testConfig := integration.GetTestConfig() testConfig.HorizonIngestParameters = map[string]string{"disable-path-finding": "true"} test := integration.NewTest(t, *testConfig) - err := test.StartHorizon() + err := test.StartHorizon(true) assert.NoError(t, err) - test.WaitForHorizon() + test.WaitForHorizonIngest() assert.Nil(t, test.HorizonIngest().Paths()) test.Shutdown() }) @@ -364,7 +364,7 @@ func TestDisableTxSub(t *testing.T) { testConfig.HorizonIngestParameters = localParams testConfig.SkipCoreContainerCreation = true test := integration.NewTest(t, *testConfig) - err := test.StartHorizon() + err := test.StartHorizon(true) assert.ErrorContains(t, err, "cannot initialize Horizon: flag --stellar-core-url cannot be empty") test.Shutdown() }) @@ -379,7 +379,7 @@ func TestDisableTxSub(t *testing.T) { testConfig.HorizonIngestParameters = localParams testConfig.SkipCoreContainerCreation = true test := integration.NewTest(t, *testConfig) - err := test.StartHorizon() + err := test.StartHorizon(true) assert.NoError(t, err) test.Shutdown() }) @@ -390,9 +390,9 @@ func TestDisableTxSub(t *testing.T) { "ingest": "true", } test := integration.NewTest(t, *testConfig) - err := test.StartHorizon() + err := test.StartHorizon(true) assert.NoError(t, err) - test.WaitForHorizon() + test.WaitForHorizonIngest() test.Shutdown() }) t.Run("do not require stellar-core-url when both DISABLE_TX_SUB=true and INGEST=false", func(t *testing.T) { @@ -405,7 +405,7 @@ func TestDisableTxSub(t *testing.T) { testConfig.HorizonIngestParameters = localParams testConfig.SkipCoreContainerCreation = true test := integration.NewTest(t, *testConfig) - err := test.StartHorizon() + err := test.StartHorizon(true) assert.NoError(t, err) test.Shutdown() }) @@ -421,9 +421,9 @@ func TestDeprecatedOutputs(t *testing.T) { testConfig := integration.GetTestConfig() testConfig.HorizonIngestParameters = map[string]string{"exp-enable-ingestion-filtering": "false"} test := integration.NewTest(t, *testConfig) - err := test.StartHorizon() + err := test.StartHorizon(true) assert.NoError(t, err) - test.WaitForHorizon() + test.WaitForHorizonIngest() // Use a wait group to wait for the goroutine to finish before proceeding var wg sync.WaitGroup @@ -507,9 +507,9 @@ func TestDeprecatedOutputs(t *testing.T) { testConfig := integration.GetTestConfig() testConfig.HorizonIngestParameters = map[string]string{"captive-core-use-db": "true"} test := integration.NewTest(t, *testConfig) - err := test.StartHorizon() + err := test.StartHorizon(true) assert.NoError(t, err) - test.WaitForHorizon() + test.WaitForHorizonIngest() // Use a wait group to wait for the goroutine to finish before proceeding var wg sync.WaitGroup diff --git a/services/horizon/internal/test/integration/integration.go b/services/horizon/internal/test/integration/integration.go index 0402301a44..a4fcf34469 100644 --- a/services/horizon/internal/test/integration/integration.go +++ b/services/horizon/internal/test/integration/integration.go @@ -23,11 +23,13 @@ import ( "github.com/creachadair/jrpc2/jhttp" "github.com/spf13/cobra" "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" sdk "github.com/stellar/go/clients/horizonclient" "github.com/stellar/go/clients/stellarcore" "github.com/stellar/go/keypair" proto "github.com/stellar/go/protocols/horizon" + horizoncmd "github.com/stellar/go/services/horizon/cmd" horizon "github.com/stellar/go/services/horizon/internal" "github.com/stellar/go/services/horizon/internal/ingest" "github.com/stellar/go/support/config" @@ -91,6 +93,7 @@ type Test struct { coreConfig CaptiveConfig horizonIngestConfig horizon.Config horizonWebConfig horizon.Config + testDB *dbtest.DB environment *test.EnvironmentManager horizonClient *sdk.Client @@ -170,11 +173,11 @@ func NewTest(t *testing.T, config Config) *Test { } if !config.SkipHorizonStart { - if innerErr := i.StartHorizon(); innerErr != nil { + if innerErr := i.StartHorizon(true); innerErr != nil { t.Fatalf("Failed to start Horizon: %v", innerErr) } - i.WaitForHorizon() + i.WaitForHorizonIngest() } return i @@ -297,14 +300,15 @@ func (i *Test) prepareShutdownHandlers() { }() } -func (i *Test) RestartHorizon() error { +// if startIngestProcess=true, will restart the ingest sub process also +func (i *Test) RestartHorizon(restartIngestProcess bool) error { i.StopHorizon() - if err := i.StartHorizon(); err != nil { + if err := i.StartHorizon(restartIngestProcess); err != nil { return err } - i.WaitForHorizon() + i.WaitForHorizonIngest() return nil } @@ -316,6 +320,10 @@ func (i *Test) GetHorizonWebConfig() horizon.Config { return i.horizonWebConfig } +func (i *Test) GetTestDB() *dbtest.DB { + return i.testDB +} + // Shutdown stops the integration tests and destroys all its associated // resources. It will be implicitly called when the calling test (i.e. the // `testing.Test` passed to `New()`) is finished if it hasn't been explicitly @@ -329,68 +337,80 @@ func (i *Test) Shutdown() { }) } -// StartHorizon initializes and starts the Horizon client-facing API server and the ingest server. -func (i *Test) StartHorizon() error { - postgres := dbtest.Postgres(i.t) +// StartHorizon initializes and starts the Horizon client-facing API server. +// When startIngestProcess=true, start a second process for ingest server +func (i *Test) StartHorizon(startIngestProcess bool) error { + i.testDB = dbtest.Postgres(i.t) i.shutdownCalls = append(i.shutdownCalls, func() { i.StopHorizon() - postgres.Close() + i.testDB.Close() }) + var err error + + if err = i.initializeEnvironmentVariables(); err != nil { + return err + } // To facilitate custom runs of Horizon, we merge a default set of // parameters with the tester-supplied ones (if any). - mergedWebArgs := MergeMaps(i.getDefaultWebArgs(postgres), i.config.HorizonWebParameters) - webArgs := mapToFlags(mergedWebArgs) - i.t.Log("Horizon command line webArgs:", webArgs) + mergedWebArgs := MergeMaps(i.getDefaultWebArgs(), i.config.HorizonWebParameters) + mergedIngestArgs := MergeMaps(i.getDefaultIngestArgs(), i.config.HorizonIngestParameters) - mergedIngestArgs := MergeMaps(i.getDefaultIngestArgs(postgres), i.config.HorizonIngestParameters) - ingestArgs := mapToFlags(mergedIngestArgs) - i.t.Log("Horizon command line ingestArgs:", ingestArgs) + // Set up Horizon clients + i.setupHorizonClient(mergedWebArgs) + if err = i.setupHorizonAdminClient(mergedIngestArgs); err != nil { + return err + } - // setup Horizon web command - var err error + // setup Horizon web process + webArgs := mapToFlags(mergedWebArgs) + i.t.Log("Horizon command line webArgs:", webArgs) webConfig, webConfigOpts := horizon.Flags() webCmd := i.createWebCommand(webConfig, webConfigOpts) webCmd.SetArgs(webArgs) if err = webConfigOpts.Init(webCmd); err != nil { return errors.Wrap(err, "cannot initialize params") } - - // setup Horizon ingest command - ingestConfig, ingestConfigOpts := horizon.Flags() - ingestCmd := i.createIngestCommand(ingestConfig, ingestConfigOpts) - ingestCmd.SetArgs(ingestArgs) - if err = ingestConfigOpts.Init(ingestCmd); err != nil { - return errors.Wrap(err, "cannot initialize params") - } - - if err = i.initializeEnvironmentVariables(); err != nil { - return err - } - - if err = ingestCmd.Execute(); err != nil { - return errors.Wrap(err, HorizonInitErrStr) - } - if err = webCmd.Execute(); err != nil { return errors.Wrap(err, HorizonInitErrStr) } + i.horizonWebConfig = *webConfig - // Set up Horizon clients - i.setupHorizonClient(mergedWebArgs) - if err = i.setupHorizonAdminClient(mergedIngestArgs); err != nil { - return err + // setup horizon ingest process + if startIngestProcess { + ingestArgs := mapToFlags(mergedIngestArgs) + i.t.Log("Horizon command line ingestArgs:", ingestArgs) + // setup Horizon ingest command + ingestConfig, ingestConfigOpts := horizon.Flags() + ingestCmd := i.createIngestCommand(ingestConfig, ingestConfigOpts) + ingestCmd.SetArgs(ingestArgs) + if err = ingestConfigOpts.Init(ingestCmd); err != nil { + return errors.Wrap(err, "cannot initialize params") + } + if err = ingestCmd.Execute(); err != nil { + return errors.Wrap(err, HorizonInitErrStr) + } + i.horizonIngestConfig = *ingestConfig + } else { + // not running ingestion, normally that process would do migration through --apply-migrations + // so migrage the empty in any case directly + var rootCmd = horizoncmd.NewRootCmd() + rootCmd.SetArgs([]string{ + "db", "migrate", "up", "--db-url", i.testDB.DSN}) + require.NoError(i.t, rootCmd.Execute()) } - i.horizonIngestConfig = *ingestConfig - i.horizonWebConfig = *webConfig - i.appStopped = &sync.WaitGroup{} - i.appStopped.Add(2) - go func() { - _ = i.ingestNode.Serve() - i.appStopped.Done() - }() + + if i.ingestNode != nil { + i.appStopped.Add(1) + go func() { + _ = i.ingestNode.Serve() + i.appStopped.Done() + }() + } + + i.appStopped.Add(1) go func() { _ = i.webNode.Serve() i.appStopped.Done() @@ -399,13 +419,13 @@ func (i *Test) StartHorizon() error { return nil } -func (i *Test) getDefaultArgs(postgres *dbtest.DB) map[string]string { +func (i *Test) getDefaultArgs() map[string]string { // TODO: Ideally, we'd be pulling host/port information from the Docker // Compose YAML file itself rather than hardcoding it. return map[string]string{ "ingest": "false", "history-archive-urls": fmt.Sprintf("http://%s:%d", "localhost", historyArchivePort), - "db-url": postgres.RO_DSN, + "db-url": i.testDB.RO_DSN, "stellar-core-url": i.coreClient.URL, "network-passphrase": i.passPhrase, "apply-migrations": "true", @@ -417,15 +437,15 @@ func (i *Test) getDefaultArgs(postgres *dbtest.DB) map[string]string { } } -func (i *Test) getDefaultWebArgs(postgres *dbtest.DB) map[string]string { - return MergeMaps(i.getDefaultArgs(postgres), map[string]string{"admin-port": "0"}) +func (i *Test) getDefaultWebArgs() map[string]string { + return MergeMaps(i.getDefaultArgs(), map[string]string{"admin-port": "0"}) } -func (i *Test) getDefaultIngestArgs(postgres *dbtest.DB) map[string]string { - return MergeMaps(i.getDefaultArgs(postgres), map[string]string{ +func (i *Test) getDefaultIngestArgs() map[string]string { + return MergeMaps(i.getDefaultArgs(), map[string]string{ "admin-port": strconv.Itoa(i.AdminPort()), "port": "8001", - "db-url": postgres.DSN, + "db-url": i.testDB.DSN, "stellar-core-binary-path": i.coreConfig.binaryPath, "captive-core-config-path": i.coreConfig.configPath, "captive-core-http-port": "21626", @@ -816,7 +836,17 @@ func (i *Test) UpgradeProtocol(version uint32) { i.t.Fatalf("could not upgrade protocol in 10s") } -func (i *Test) WaitForHorizon() { +func (i *Test) WaitForHorizonWeb() { + // wait until the web server is up before continuing to test requests + require.Eventually(i.t, func() bool { + if _, horizonErr := i.Client().Root(); horizonErr != nil { + return false + } + return true + }, time.Second*15, time.Millisecond*100) +} + +func (i *Test) WaitForHorizonIngest() { for t := 60; t >= 0; t -= 1 { time.Sleep(time.Second) From 1642a1ae76ea12c8a22b913f135f38957f5ff60e Mon Sep 17 00:00:00 2001 From: Shawn Reuland Date: Wed, 17 Jul 2024 17:32:22 -0700 Subject: [PATCH 22/24] #4911: fix integeation test StartHorizon usage when intentional cmd exec error, make sure close db conn regardless during test shutdown --- .../internal/integration/parameters_test.go | 27 ------------------- .../internal/test/integration/integration.go | 13 ++++++++- 2 files changed, 12 insertions(+), 28 deletions(-) diff --git a/services/horizon/internal/integration/parameters_test.go b/services/horizon/internal/integration/parameters_test.go index 3d131ba77d..91e16ce186 100644 --- a/services/horizon/internal/integration/parameters_test.go +++ b/services/horizon/internal/integration/parameters_test.go @@ -12,7 +12,6 @@ import ( "strings" "sync" "testing" - "time" "github.com/spf13/cobra" @@ -80,9 +79,6 @@ func TestBucketDirDisallowed(t *testing.T) { assert.Equal(t, err.Error(), integration.HorizonInitErrStr+": error generating captive core configuration:"+ " invalid captive core toml file: could not unmarshal captive core toml: setting BUCKET_DIR_PATH is disallowed"+ " for Captive Core, use CAPTIVE_CORE_STORAGE_PATH instead") - time.Sleep(1 * time.Second) - test.StopHorizon() - test.Shutdown() } func TestEnvironmentPreserved(t *testing.T) { @@ -117,8 +113,6 @@ func TestEnvironmentPreserved(t *testing.T) { envValue := os.Getenv("STELLAR_CORE_URL") assert.Equal(t, StellarCoreURL, envValue) - test.Shutdown() - envValue = os.Getenv("STELLAR_CORE_URL") assert.Equal(t, "original value", envValue) } @@ -161,11 +155,7 @@ func TestInvalidNetworkParameters(t *testing.T) { testConfig.HorizonIngestParameters = localParams test := integration.NewTest(t, *testConfig) err := test.StartHorizon(true) - // Adding sleep as a workaround for the race condition in the ingestion system. - // https://github.com/stellar/go/issues/5005 - time.Sleep(2 * time.Second) assert.Equal(t, testCase.errMsg, err.Error()) - test.Shutdown() }) } } @@ -205,14 +195,9 @@ func TestNetworkParameter(t *testing.T) { testConfig.HorizonIngestParameters = localParams test := integration.NewTest(t, *testConfig) err := test.StartHorizon(true) - // Adding sleep as a workaround for the race condition in the ingestion system. - // https://github.com/stellar/go/issues/5005 - time.Sleep(2 * time.Second) assert.NoError(t, err) assert.Equal(t, test.GetHorizonIngestConfig().HistoryArchiveURLs, tt.historyArchiveURLs) assert.Equal(t, test.GetHorizonIngestConfig().NetworkPassphrase, tt.networkPassphrase) - - test.Shutdown() }) } } @@ -248,11 +233,7 @@ func TestNetworkEnvironmentVariable(t *testing.T) { testConfig.HorizonEnvironment = map[string]string{"NETWORK": networkValue} test := integration.NewTest(t, *testConfig) err := test.StartHorizon(true) - // Adding sleep here as a workaround for the race condition in the ingestion system. - // More details can be found at https://github.com/stellar/go/issues/5005 - time.Sleep(2 * time.Second) assert.NoError(t, err) - test.Shutdown() }) } } @@ -300,7 +281,6 @@ func TestMaxAssetsForPathRequests(t *testing.T) { assert.NoError(t, err) test.WaitForHorizonIngest() assert.Equal(t, test.HorizonIngest().Config().MaxAssetsPerPathRequest, 2) - test.Shutdown() }) } @@ -326,7 +306,6 @@ func TestMaxPathFindingRequests(t *testing.T) { finder, ok := test.HorizonIngest().Paths().(*paths.RateLimitedFinder) assert.True(t, ok) assert.Equal(t, finder.Limit(), 5) - test.Shutdown() }) } @@ -339,7 +318,6 @@ func TestDisablePathFinding(t *testing.T) { assert.Equal(t, test.HorizonIngest().Config().MaxPathFindingRequests, uint(0)) _, ok := test.HorizonIngest().Paths().(simplepath.InMemoryFinder) assert.True(t, ok) - test.Shutdown() }) t.Run("set to true", func(t *testing.T) { testConfig := integration.GetTestConfig() @@ -349,7 +327,6 @@ func TestDisablePathFinding(t *testing.T) { assert.NoError(t, err) test.WaitForHorizonIngest() assert.Nil(t, test.HorizonIngest().Paths()) - test.Shutdown() }) } @@ -366,7 +343,6 @@ func TestDisableTxSub(t *testing.T) { test := integration.NewTest(t, *testConfig) err := test.StartHorizon(true) assert.ErrorContains(t, err, "cannot initialize Horizon: flag --stellar-core-url cannot be empty") - test.Shutdown() }) t.Run("horizon starts successfully when DISABLE_TX_SUB=false, INGEST=false and stellar-core-url is provided", func(t *testing.T) { localParams := integration.MergeMaps(networkParamArgs, map[string]string{ @@ -381,7 +357,6 @@ func TestDisableTxSub(t *testing.T) { test := integration.NewTest(t, *testConfig) err := test.StartHorizon(true) assert.NoError(t, err) - test.Shutdown() }) t.Run("horizon starts successfully when DISABLE_TX_SUB=true and INGEST=true", func(t *testing.T) { testConfig := integration.GetTestConfig() @@ -393,7 +368,6 @@ func TestDisableTxSub(t *testing.T) { err := test.StartHorizon(true) assert.NoError(t, err) test.WaitForHorizonIngest() - test.Shutdown() }) t.Run("do not require stellar-core-url when both DISABLE_TX_SUB=true and INGEST=false", func(t *testing.T) { localParams := integration.MergeMaps(networkParamArgs, map[string]string{ @@ -407,7 +381,6 @@ func TestDisableTxSub(t *testing.T) { test := integration.NewTest(t, *testConfig) err := test.StartHorizon(true) assert.NoError(t, err) - test.Shutdown() }) } diff --git a/services/horizon/internal/test/integration/integration.go b/services/horizon/internal/test/integration/integration.go index a4fcf34469..ca99fa4c06 100644 --- a/services/horizon/internal/test/integration/integration.go +++ b/services/horizon/internal/test/integration/integration.go @@ -342,6 +342,18 @@ func (i *Test) Shutdown() { func (i *Test) StartHorizon(startIngestProcess bool) error { i.testDB = dbtest.Postgres(i.t) i.shutdownCalls = append(i.shutdownCalls, func() { + if i.appStopped == nil { + // appStopped is nil when the horizon cmd.Execute creates an App, but gets an intentional error and StartHorizon + // never gets to point of running App.Serve() which would have closed the db conn eventually + // since it wires up listener to App.Close() invocation, so, we must manually detect this edge case and + // close the app's db here to clean up + if i.webNode != nil { + i.webNode.CloseDB() + } + if i.ingestNode != nil { + i.ingestNode.CloseDB() + } + } i.StopHorizon() i.testDB.Close() }) @@ -401,7 +413,6 @@ func (i *Test) StartHorizon(startIngestProcess bool) error { } i.appStopped = &sync.WaitGroup{} - if i.ingestNode != nil { i.appStopped.Add(1) go func() { From 38f2b6daca2998fa197bf91f7a220a0988f9f9c6 Mon Sep 17 00:00:00 2001 From: Shawn Reuland Date: Wed, 17 Jul 2024 18:45:55 -0700 Subject: [PATCH 23/24] #4911: fixed environment integration test of tests, needed to shutdown test during test --- services/horizon/internal/integration/parameters_test.go | 2 ++ services/horizon/internal/test/integration/integration.go | 8 ++++---- 2 files changed, 6 insertions(+), 4 deletions(-) diff --git a/services/horizon/internal/integration/parameters_test.go b/services/horizon/internal/integration/parameters_test.go index 91e16ce186..bcba3164cd 100644 --- a/services/horizon/internal/integration/parameters_test.go +++ b/services/horizon/internal/integration/parameters_test.go @@ -113,6 +113,8 @@ func TestEnvironmentPreserved(t *testing.T) { envValue := os.Getenv("STELLAR_CORE_URL") assert.Equal(t, StellarCoreURL, envValue) + test.Shutdown() + envValue = os.Getenv("STELLAR_CORE_URL") assert.Equal(t, "original value", envValue) } diff --git a/services/horizon/internal/test/integration/integration.go b/services/horizon/internal/test/integration/integration.go index ca99fa4c06..9884470d70 100644 --- a/services/horizon/internal/test/integration/integration.go +++ b/services/horizon/internal/test/integration/integration.go @@ -359,10 +359,6 @@ func (i *Test) StartHorizon(startIngestProcess bool) error { }) var err error - if err = i.initializeEnvironmentVariables(); err != nil { - return err - } - // To facilitate custom runs of Horizon, we merge a default set of // parameters with the tester-supplied ones (if any). mergedWebArgs := MergeMaps(i.getDefaultWebArgs(), i.config.HorizonWebParameters) @@ -374,6 +370,10 @@ func (i *Test) StartHorizon(startIngestProcess bool) error { return err } + if err = i.initializeEnvironmentVariables(); err != nil { + return err + } + // setup Horizon web process webArgs := mapToFlags(mergedWebArgs) i.t.Log("Horizon command line webArgs:", webArgs) From dee1424d22db7c6c477662101183865631c53475 Mon Sep 17 00:00:00 2001 From: Shawn Reuland Date: Wed, 17 Jul 2024 20:49:06 -0700 Subject: [PATCH 24/24] #4911: revert some of prior change, need to keep the time.sleep on some parameters_test cases due to ingest bug stated in comments --- .../horizon/internal/integration/parameters_test.go | 10 ++++++++++ 1 file changed, 10 insertions(+) diff --git a/services/horizon/internal/integration/parameters_test.go b/services/horizon/internal/integration/parameters_test.go index bcba3164cd..c7e0d0c75b 100644 --- a/services/horizon/internal/integration/parameters_test.go +++ b/services/horizon/internal/integration/parameters_test.go @@ -12,6 +12,7 @@ import ( "strings" "sync" "testing" + "time" "github.com/spf13/cobra" @@ -157,6 +158,9 @@ func TestInvalidNetworkParameters(t *testing.T) { testConfig.HorizonIngestParameters = localParams test := integration.NewTest(t, *testConfig) err := test.StartHorizon(true) + // Adding sleep as a workaround for the race condition in the ingestion system. + // https://github.com/stellar/go/issues/5005 + time.Sleep(2 * time.Second) assert.Equal(t, testCase.errMsg, err.Error()) }) } @@ -197,6 +201,9 @@ func TestNetworkParameter(t *testing.T) { testConfig.HorizonIngestParameters = localParams test := integration.NewTest(t, *testConfig) err := test.StartHorizon(true) + // Adding sleep as a workaround for the race condition in the ingestion system. + // https://github.com/stellar/go/issues/5005 + time.Sleep(2 * time.Second) assert.NoError(t, err) assert.Equal(t, test.GetHorizonIngestConfig().HistoryArchiveURLs, tt.historyArchiveURLs) assert.Equal(t, test.GetHorizonIngestConfig().NetworkPassphrase, tt.networkPassphrase) @@ -235,6 +242,9 @@ func TestNetworkEnvironmentVariable(t *testing.T) { testConfig.HorizonEnvironment = map[string]string{"NETWORK": networkValue} test := integration.NewTest(t, *testConfig) err := test.StartHorizon(true) + // Adding sleep as a workaround for the race condition in the ingestion system. + // https://github.com/stellar/go/issues/5005 + time.Sleep(2 * time.Second) assert.NoError(t, err) }) }