diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index 0fea7eaeb0..5d45d9b70a 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -94,7 +94,7 @@ jobs:
matrix:
go-version: [1.22.x, 1.x]
platform: [ubuntu-latest]
- module: [artemis, azurite, cassandra, chroma, clickhouse, cockroachdb, compose, consul, couchbase, databend, dolt, dynamodb, elasticsearch, etcd, gcloud, grafana-lgtm, inbucket, influxdb, k3s, k6, kafka, localstack, mariadb, milvus, minio, mockserver, mongodb, mssql, mysql, nats, neo4j, ollama, openfga, openldap, opensearch, postgres, pulsar, qdrant, rabbitmq, redis, redpanda, registry, surrealdb, valkey, vault, vearch, weaviate]
+ module: [artemis, azurite, cassandra, chroma, clickhouse, cockroachdb, compose, consul, couchbase, databend, dolt, dynamodb, elasticsearch, etcd, gcloud, grafana-lgtm, inbucket, influxdb, k3s, k6, kafka, localstack, mariadb, milvus, minio, mockserver, mongodb, mssql, mysql, nats, neo4j, ollama, openfga, openldap, opensearch, postgres, pulsar, qdrant, rabbitmq, redis, redpanda, registry, surrealdb, valkey, vault, vearch, weaviate, yugabytedb]
uses: ./.github/workflows/ci-test-go.yml
with:
go-version: ${{ matrix.go-version }}
diff --git a/.vscode/.testcontainers-go.code-workspace b/.vscode/.testcontainers-go.code-workspace
index 68a2751a5d..73e2699036 100644
--- a/.vscode/.testcontainers-go.code-workspace
+++ b/.vscode/.testcontainers-go.code-workspace
@@ -201,6 +201,10 @@
"name": "module / weaviate",
"path": "../modules/weaviate"
},
+ {
+ "name": "module / yugabytedb",
+ "path": "../modules/yugabytedb"
+ },
{
"name": "modulegen",
"path": "../modulegen"
diff --git a/container.go b/container.go
index 1e95fb09d4..d114a5988a 100644
--- a/container.go
+++ b/container.go
@@ -37,17 +37,17 @@ type DeprecatedContainer interface {
// Container allows getting info about and controlling a single container instance
type Container interface {
- GetContainerID() string // get the container id from the provider
- Endpoint(context.Context, string) (string, error) // get proto://ip:port string for the lowest exposed port
- PortEndpoint(context.Context, nat.Port, string) (string, error) // get proto://ip:port string for the given exposed port
- Host(context.Context) (string, error) // get host where the container port is exposed
- Inspect(context.Context) (*types.ContainerJSON, error) // get container info
- MappedPort(context.Context, nat.Port) (nat.Port, error) // get externally mapped port for a container port
- Ports(context.Context) (nat.PortMap, error) // Deprecated: Use c.Inspect(ctx).NetworkSettings.Ports instead
- SessionID() string // get session id
- IsRunning() bool // IsRunning returns true if the container is running, false otherwise.
- Start(context.Context) error // start the container
- Stop(context.Context, *time.Duration) error // stop the container
+ GetContainerID() string // get the container id from the provider
+ Endpoint(context.Context, string) (string, error) // get proto://ip:port string for the lowest exposed port
+ PortEndpoint(ctx context.Context, port nat.Port, proto string) (string, error) // get proto://ip:port string for the given exposed port
+ Host(context.Context) (string, error) // get host where the container port is exposed
+ Inspect(context.Context) (*types.ContainerJSON, error) // get container info
+ MappedPort(context.Context, nat.Port) (nat.Port, error) // get externally mapped port for a container port
+ Ports(context.Context) (nat.PortMap, error) // Deprecated: Use c.Inspect(ctx).NetworkSettings.Ports instead
+ SessionID() string // get session id
+ IsRunning() bool // IsRunning returns true if the container is running, false otherwise.
+ Start(context.Context) error // start the container
+ Stop(context.Context, *time.Duration) error // stop the container
// Terminate stops and removes the container and its image if it was built and not flagged as kept.
Terminate(ctx context.Context) error
diff --git a/container_ignore_test.go b/container_ignore_test.go
index ca89db4d89..505b9edd6d 100644
--- a/container_ignore_test.go
+++ b/container_ignore_test.go
@@ -6,6 +6,7 @@ import (
"testing"
"github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
)
func TestParseDockerIgnore(t *testing.T) {
@@ -37,7 +38,7 @@ func TestParseDockerIgnore(t *testing.T) {
for _, testCase := range testCases {
exists, excluded, err := parseDockerIgnore(testCase.filePath)
assert.Equal(t, testCase.exists, exists)
- assert.Equal(t, testCase.expectedErr, err)
+ require.ErrorIs(t, testCase.expectedErr, err)
assert.Equal(t, testCase.expectedExcluded, excluded)
}
}
diff --git a/container_test.go b/container_test.go
index 0cca97e6f5..562eb038a4 100644
--- a/container_test.go
+++ b/container_test.go
@@ -320,7 +320,7 @@ func TestCustomLabelsImage(t *testing.T) {
ctr, err := testcontainers.GenericContainer(ctx, req)
require.NoError(t, err)
- t.Cleanup(func() { assert.NoError(t, ctr.Terminate(ctx)) })
+ t.Cleanup(func() { require.NoError(t, ctr.Terminate(ctx)) })
ctrJSON, err := ctr.Inspect(ctx)
require.NoError(t, err)
diff --git a/docker.go b/docker.go
index 395dc169ce..488cbe9f67 100644
--- a/docker.go
+++ b/docker.go
@@ -182,7 +182,7 @@ func (c *DockerContainer) Inspect(ctx context.Context) (*types.ContainerJSON, er
func (c *DockerContainer) MappedPort(ctx context.Context, port nat.Port) (nat.Port, error) {
inspect, err := c.Inspect(ctx)
if err != nil {
- return "", err
+ return "", fmt.Errorf("inspect: %w", err)
}
if inspect.ContainerJSONBase.HostConfig.NetworkMode == "host" {
return port, nil
@@ -203,7 +203,7 @@ func (c *DockerContainer) MappedPort(ctx context.Context, port nat.Port) (nat.Po
return nat.NewPort(k.Proto(), p[0].HostPort)
}
- return "", errors.New("port not found")
+ return "", errdefs.NotFound(fmt.Errorf("port %q not found", port))
}
// Deprecated: use c.Inspect(ctx).NetworkSettings.Ports instead.
@@ -979,9 +979,7 @@ func (p *DockerProvider) BuildImage(ctx context.Context, img ImageBuildInfo) (st
}
// CreateContainer fulfils a request for a container without starting it
-func (p *DockerProvider) CreateContainer(ctx context.Context, req ContainerRequest) (Container, error) {
- var err error
-
+func (p *DockerProvider) CreateContainer(ctx context.Context, req ContainerRequest) (con Container, err error) { //nolint:nonamedreturns // Needed for error checking.
// defer the close of the Docker client connection the soonest
defer p.Close()
@@ -1000,22 +998,23 @@ func (p *DockerProvider) CreateContainer(ctx context.Context, req ContainerReque
// the reaper does not need to start a reaper for itself
isReaperContainer := strings.HasSuffix(imageName, config.ReaperDefaultImage)
if !p.config.RyukDisabled && !isReaperContainer {
- r, err := reuseOrCreateReaper(context.WithValue(ctx, core.DockerHostContextKey, p.host), core.SessionID(), p)
+ r, err := spawner.reaper(context.WithValue(ctx, core.DockerHostContextKey, p.host), core.SessionID(), p)
if err != nil {
- return nil, fmt.Errorf("%w: creating reaper failed", err)
+ return nil, fmt.Errorf("reaper: %w", err)
}
- termSignal, err = r.Connect()
+
+ termSignal, err := r.Connect()
if err != nil {
- return nil, fmt.Errorf("%w: connecting to reaper failed", err)
+ return nil, fmt.Errorf("reaper connect: %w", err)
}
- }
- // Cleanup on error, otherwise set termSignal to nil before successful return.
- defer func() {
- if termSignal != nil {
- termSignal <- true
- }
- }()
+ // Cleanup on error.
+ defer func() {
+ if err != nil {
+ termSignal <- true
+ }
+ }()
+ }
if err = req.Validate(); err != nil {
return nil, err
@@ -1081,10 +1080,9 @@ func (p *DockerProvider) CreateContainer(ctx context.Context, req ContainerReque
}
if !isReaperContainer {
- // add the labels that the reaper will use to terminate the container to the request
- for k, v := range core.DefaultLabels(core.SessionID()) {
- req.Labels[k] = v
- }
+ // Add the labels that identify this as a testcontainers container and
+ // allow the reaper to terminate it if requested.
+ AddGenericLabels(req.Labels)
}
dockerInput := &container.Config{
@@ -1178,9 +1176,6 @@ func (p *DockerProvider) CreateContainer(ctx context.Context, req ContainerReque
return nil, err
}
- // Disable cleanup on success
- termSignal = nil
-
return c, nil
}
@@ -1229,7 +1224,7 @@ func (p *DockerProvider) waitContainerCreation(ctx context.Context, name string)
)
}
-func (p *DockerProvider) ReuseOrCreateContainer(ctx context.Context, req ContainerRequest) (Container, error) {
+func (p *DockerProvider) ReuseOrCreateContainer(ctx context.Context, req ContainerRequest) (con Container, err error) { //nolint:nonamedreturns // Needed for error check.
c, err := p.findContainerByName(ctx, req.Name)
if err != nil {
return nil, err
@@ -1252,14 +1247,22 @@ func (p *DockerProvider) ReuseOrCreateContainer(ctx context.Context, req Contain
var termSignal chan bool
if !p.config.RyukDisabled {
- r, err := reuseOrCreateReaper(context.WithValue(ctx, core.DockerHostContextKey, p.host), sessionID, p)
+ r, err := spawner.reaper(context.WithValue(ctx, core.DockerHostContextKey, p.host), sessionID, p)
if err != nil {
return nil, fmt.Errorf("reaper: %w", err)
}
- termSignal, err = r.Connect()
+
+ termSignal, err := r.Connect()
if err != nil {
- return nil, fmt.Errorf("%w: connecting to reaper failed", err)
+ return nil, fmt.Errorf("reaper connect: %w", err)
}
+
+ // Cleanup on error.
+ defer func() {
+ if err != nil {
+ termSignal <- true
+ }
+ }()
}
// default hooks include logger hook and pre-create hook
@@ -1427,9 +1430,7 @@ func daemonHost(ctx context.Context, p *DockerProvider) (string, error) {
// Deprecated: use network.New instead
// CreateNetwork returns the object representing a new network identified by its name
-func (p *DockerProvider) CreateNetwork(ctx context.Context, req NetworkRequest) (Network, error) {
- var err error
-
+func (p *DockerProvider) CreateNetwork(ctx context.Context, req NetworkRequest) (net Network, err error) { //nolint:nonamedreturns // Needed for error check.
// defer the close of the Docker client connection the soonest
defer p.Close()
@@ -1450,31 +1451,30 @@ func (p *DockerProvider) CreateNetwork(ctx context.Context, req NetworkRequest)
var termSignal chan bool
if !p.config.RyukDisabled {
- r, err := reuseOrCreateReaper(context.WithValue(ctx, core.DockerHostContextKey, p.host), sessionID, p)
+ r, err := spawner.reaper(context.WithValue(ctx, core.DockerHostContextKey, p.host), sessionID, p)
if err != nil {
- return nil, fmt.Errorf("%w: creating network reaper failed", err)
+ return nil, fmt.Errorf("reaper: %w", err)
}
- termSignal, err = r.Connect()
+
+ termSignal, err := r.Connect()
if err != nil {
- return nil, fmt.Errorf("%w: connecting to network reaper failed", err)
+ return nil, fmt.Errorf("reaper connect: %w", err)
}
- }
- // add the labels that the reaper will use to terminate the network to the request
- for k, v := range core.DefaultLabels(sessionID) {
- req.Labels[k] = v
+ // Cleanup on error.
+ defer func() {
+ if err != nil {
+ termSignal <- true
+ }
+ }()
}
- // Cleanup on error, otherwise set termSignal to nil before successful return.
- defer func() {
- if termSignal != nil {
- termSignal <- true
- }
- }()
+ // add the labels that the reaper will use to terminate the network to the request
+ core.AddDefaultLabels(sessionID, req.Labels)
response, err := p.client.NetworkCreate(ctx, req.Name, nc)
if err != nil {
- return &DockerNetwork{}, err
+ return &DockerNetwork{}, fmt.Errorf("create network: %w", err)
}
n := &DockerNetwork{
@@ -1485,9 +1485,6 @@ func (p *DockerProvider) CreateNetwork(ctx context.Context, req NetworkRequest)
provider: p,
}
- // Disable cleanup on success
- termSignal = nil
-
return n, nil
}
@@ -1553,7 +1550,7 @@ func containerFromDockerResponse(ctx context.Context, response types.Container)
// populate the raw representation of the container
jsonRaw, err := ctr.inspectRawContainer(ctx)
if err != nil {
- return nil, err
+ return nil, fmt.Errorf("inspect raw container: %w", err)
}
// the health status of the container, if any
diff --git a/docker_mounts.go b/docker_mounts.go
index aed3010361..d8af3fae3e 100644
--- a/docker_mounts.go
+++ b/docker_mounts.go
@@ -126,9 +126,7 @@ func mapToDockerMounts(containerMounts ContainerMounts) []mount.Mount {
Labels: make(map[string]string),
}
}
- for k, v := range GenericLabels() {
- containerMount.VolumeOptions.Labels[k] = v
- }
+ AddGenericLabels(containerMount.VolumeOptions.Labels)
}
mounts = append(mounts, containerMount)
diff --git a/docker_test.go b/docker_test.go
index ee60b7e957..6177b7d743 100644
--- a/docker_test.go
+++ b/docker_test.go
@@ -293,7 +293,7 @@ func TestContainerStateAfterTermination(t *testing.T) {
state, err := nginx.State(ctx)
require.Error(t, err, "expected error from container inspect.")
- assert.Nil(t, state, "expected nil container inspect.")
+ require.Nil(t, state, "expected nil container inspect.")
})
t.Run("Nil State after termination if raw as already set", func(t *testing.T) {
@@ -1287,7 +1287,7 @@ func TestContainerInspect_RawInspectIsCleanedOnStop(t *testing.T) {
inspect, err := ctr.Inspect(context.Background())
require.NoError(t, err)
- assert.NotEmpty(t, inspect.ID)
+ require.NotEmpty(t, inspect.ID)
require.NoError(t, ctr.Stop(context.Background(), nil))
}
@@ -1641,7 +1641,7 @@ func TestDockerContainerCopyEmptyFileFromContainer(t *testing.T) {
if err != nil {
t.Fatal(err)
}
- assert.Empty(t, fileContentFromContainer)
+ require.Empty(t, fileContentFromContainer)
}
func TestDockerContainerResources(t *testing.T) {
diff --git a/docs/modules/yugabytedb.md b/docs/modules/yugabytedb.md
new file mode 100644
index 0000000000..b645d678c9
--- /dev/null
+++ b/docs/modules/yugabytedb.md
@@ -0,0 +1,94 @@
+# YugabyteDB
+
+Not available until the next release of testcontainers-go :material-tag: main
+
+## Introduction
+
+The Testcontainers module for yugabyteDB.
+
+## Adding this module to your project dependencies
+
+Please run the following command to add the yugabyteDB module to your Go dependencies:
+
+```
+go get github.com/testcontainers/testcontainers-go/modules/yugabytedb
+```
+
+## Usage example
+
+
+[Creating a yugabyteDB container](../../modules/yugabytedb/examples_test.go) inside_block:runyugabyteDBContainer
+
+
+## Module Reference
+
+### Run function
+
+The yugabyteDB module exposes one entrypoint function to create the yugabyteDB container, and this function receives three parameters:
+
+```golang
+func Run(
+ ctx context.Context,
+ img string,
+ opts ...testcontainers.ContainerCustomizer,
+) (*Container, error)
+```
+
+- `context.Context`, the Go context.
+- `string`, the Docker image to use.
+- `testcontainers.ContainerCustomizer`, a variadic argument for passing options.
+
+### Container Options
+
+When starting the yugabyteDB container, you can pass options in a variadic way to configure it.
+
+#### Image
+
+If you need to set a different yugabyteDB Docker image, you can set a valid Docker image as the second argument in the `Run` function.
+E.g. `Run(context.Background(), "yugabytedb/yugabyte")`.
+
+{% include "../features/common_functional_options.md" %}
+
+#### Initial Database
+
+By default the yugabyteDB container will start with a database named `yugabyte` and the default credentials `yugabyte` and `yugabyte`.
+
+If you need to set a different database, and its credentials, you can use the `WithDatabaseName(dbName string)`, `WithDatabaseUser(dbUser string)` and `WithDatabasePassword(dbPassword string)` options.
+
+#### Initial Cluster Configuration
+
+By default the yugabyteDB container will start with a cluster keyspace named `yugabyte` and the default credentials `yugabyte` and `yugabyte`.
+
+If you need to set a different cluster keyspace, and its credentials, you can use the `WithKeyspace(keyspace string)`, `WithUser(user string)` and `WithPassword(password string)` options.
+
+### Container Methods
+
+The yugabyteDB container exposes the following methods:
+
+#### YSQLConnectionString
+
+This method returns the connection string for the yugabyteDB container when using
+the YSQL query language.
+The connection string can then be used to connect to the yugabyteDB container using
+a standard PostgreSQL client.
+
+
+[Create a postgres client using the connection string](../../modules/yugabytedb/examples_test.go) block:ExampleContainer_YSQLConnectionString
+
+
+### Usage examples
+
+#### Usage with YSQL and gocql
+
+To use the YCQL query language, you need to configure the cluster
+with the keyspace, user, and password.
+
+By default, the yugabyteDB container will start with a cluster keyspace named `yugabyte` and the default credentials `yugabyte` and `yugabyte` but you can change it using the `WithKeyspace`, `WithUser` and `WithPassword` options.
+
+In order to get the appropriate host and port to connect to the yugabyteDB container,
+you can use the `GetHost` and `GetMappedPort` methods on the Container struct.
+See the examples below:
+
+
+[Create a yugabyteDB client using the cluster configuration](../../modules/yugabytedb/yugabytedb_test.go) block:TestYugabyteDB_YCQL
+
\ No newline at end of file
diff --git a/from_dockerfile_test.go b/from_dockerfile_test.go
index a800877822..3d4c758e37 100644
--- a/from_dockerfile_test.go
+++ b/from_dockerfile_test.go
@@ -12,7 +12,6 @@ import (
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/image"
- "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
@@ -38,7 +37,7 @@ func TestBuildImageFromDockerfile(t *testing.T) {
// }
})
require.NoError(t, err)
- assert.Equal(t, "test-repo:test-tag", tag)
+ require.Equal(t, "test-repo:test-tag", tag)
_, _, err = cli.ImageInspectWithRaw(ctx, tag)
require.NoError(t, err)
@@ -73,7 +72,7 @@ func TestBuildImageFromDockerfile_NoRepo(t *testing.T) {
},
})
require.NoError(t, err)
- assert.True(t, strings.HasPrefix(tag, "test-repo:"))
+ require.True(t, strings.HasPrefix(tag, "test-repo:"))
_, _, err = cli.ImageInspectWithRaw(ctx, tag)
require.NoError(t, err)
@@ -129,7 +128,7 @@ func TestBuildImageFromDockerfile_NoTag(t *testing.T) {
},
})
require.NoError(t, err)
- assert.True(t, strings.HasSuffix(tag, ":test-tag"))
+ require.True(t, strings.HasSuffix(tag, ":test-tag"))
_, _, err = cli.ImageInspectWithRaw(ctx, tag)
require.NoError(t, err)
diff --git a/generic.go b/generic.go
index 9222ab584c..69b26ce7df 100644
--- a/generic.go
+++ b/generic.go
@@ -101,7 +101,17 @@ type GenericProvider interface {
ImageProvider
}
-// GenericLabels returns a map of labels that can be used to identify containers created by this library
+// GenericLabels returns a map of labels that can be used to identify resources
+// created by this library. This includes the standard LabelSessionID if the
+// reaper is enabled, otherwise this is excluded to prevent resources being
+// incorrectly reaped.
func GenericLabels() map[string]string {
return core.DefaultLabels(core.SessionID())
}
+
+// AddGenericLabels adds the generic labels to target.
+func AddGenericLabels(target map[string]string) {
+ for k, v := range GenericLabels() {
+ target[k] = v
+ }
+}
diff --git a/internal/config/config.go b/internal/config/config.go
index 4dcf766d58..dbef0c09dd 100644
--- a/internal/config/config.go
+++ b/internal/config/config.go
@@ -11,7 +11,7 @@ import (
"github.com/magiconair/properties"
)
-const ReaperDefaultImage = "testcontainers/ryuk:0.9.0"
+const ReaperDefaultImage = "testcontainers/ryuk:0.10.2"
var (
tcConfig Config
diff --git a/internal/config/config_test.go b/internal/config/config_test.go
index 341b62c550..70d8df7df2 100644
--- a/internal/config/config_test.go
+++ b/internal/config/config_test.go
@@ -47,7 +47,7 @@ func TestReadConfig(t *testing.T) {
Host: "", // docker socket is empty at the properties file
}
- assert.Equal(t, expected, config)
+ require.Equal(t, expected, config)
t.Setenv("TESTCONTAINERS_RYUK_DISABLED", "false")
diff --git a/internal/core/docker_host_test.go b/internal/core/docker_host_test.go
index 4c655020b3..908aa7ffbf 100644
--- a/internal/core/docker_host_test.go
+++ b/internal/core/docker_host_test.go
@@ -73,7 +73,7 @@ func TestExtractDockerHost(t *testing.T) {
host := MustExtractDockerHost(context.Background())
- assert.Equal(t, expected, host)
+ require.Equal(t, expected, host)
t.Setenv("DOCKER_HOST", "/path/to/another/docker.sock")
@@ -207,7 +207,7 @@ func TestExtractDockerHost(t *testing.T) {
socket, err := testcontainersHostFromProperties(context.Background())
require.ErrorIs(t, err, ErrTestcontainersHostNotSetInProperties)
- assert.Empty(t, socket)
+ require.Empty(t, socket)
})
t.Run("DOCKER_HOST is set", func(t *testing.T) {
@@ -227,7 +227,7 @@ func TestExtractDockerHost(t *testing.T) {
socket, err := dockerHostFromEnv(context.Background())
require.ErrorIs(t, err, ErrDockerHostNotSet)
- assert.Empty(t, socket)
+ require.Empty(t, socket)
})
t.Run("TESTCONTAINERS_DOCKER_SOCKET_OVERRIDE is set", func(t *testing.T) {
@@ -251,7 +251,7 @@ func TestExtractDockerHost(t *testing.T) {
socket, err := dockerSocketOverridePath()
require.ErrorIs(t, err, ErrDockerSocketOverrideNotSet)
- assert.Empty(t, socket)
+ require.Empty(t, socket)
})
t.Run("Context sets the Docker socket", func(t *testing.T) {
@@ -267,7 +267,7 @@ func TestExtractDockerHost(t *testing.T) {
socket, err := dockerHostFromContext(context.WithValue(ctx, DockerHostContextKey, "path-to-docker-sock"))
require.Error(t, err)
- assert.Empty(t, socket)
+ require.Empty(t, socket)
})
t.Run("Context sets a malformed schema for the Docker socket", func(t *testing.T) {
@@ -275,7 +275,7 @@ func TestExtractDockerHost(t *testing.T) {
socket, err := dockerHostFromContext(context.WithValue(ctx, DockerHostContextKey, "http://example.com/docker.sock"))
require.ErrorIs(t, err, ErrNoUnixSchema)
- assert.Empty(t, socket)
+ require.Empty(t, socket)
})
t.Run("Docker socket exists", func(t *testing.T) {
@@ -304,7 +304,7 @@ func TestExtractDockerHost(t *testing.T) {
socket, err := dockerHostFromProperties(context.Background())
require.ErrorIs(t, err, ErrDockerSocketNotSetInProperties)
- assert.Empty(t, socket)
+ require.Empty(t, socket)
})
t.Run("Docker socket does not exist", func(t *testing.T) {
@@ -312,7 +312,7 @@ func TestExtractDockerHost(t *testing.T) {
socket, err := dockerSocketPath(context.Background())
require.ErrorIs(t, err, ErrSocketNotFoundInPath)
- assert.Empty(t, socket)
+ require.Empty(t, socket)
})
t.Run("extract-from-docker-context/not-found", func(tt *testing.T) {
diff --git a/internal/core/docker_rootless_test.go b/internal/core/docker_rootless_test.go
index 7897f35783..687aa24707 100644
--- a/internal/core/docker_rootless_test.go
+++ b/internal/core/docker_rootless_test.go
@@ -70,7 +70,7 @@ func TestRootlessDockerSocketPathNotSupportedOnWindows(t *testing.T) {
t.Setenv("GOOS", "windows")
socketPath, err := rootlessDockerSocketPath(context.Background())
require.ErrorIs(t, err, ErrRootlessDockerNotSupportedWindows)
- assert.Empty(t, socketPath)
+ require.Empty(t, socketPath)
}
func TestRootlessDockerSocketPath(t *testing.T) {
@@ -179,7 +179,7 @@ func TestRootlessDockerSocketPath(t *testing.T) {
socketPath, err := rootlessDockerSocketPath(context.Background())
require.ErrorIs(t, err, ErrRootlessDockerNotFoundXDGRuntimeDir)
- assert.Empty(t, socketPath)
+ require.Empty(t, socketPath)
})
}
diff --git a/internal/core/images_test.go b/internal/core/images_test.go
index 760a5cb857..509a117c80 100644
--- a/internal/core/images_test.go
+++ b/internal/core/images_test.go
@@ -67,7 +67,7 @@ func TestExtractImagesFromDockerfile(t *testing.T) {
images, err := ExtractImagesFromDockerfile(tt.dockerfile, tt.buildArgs)
if tt.expectedError {
require.Error(t, err)
- assert.Empty(t, images)
+ require.Empty(t, images)
} else {
require.NoError(t, err)
assert.Equal(t, tt.expected, images)
diff --git a/internal/core/labels.go b/internal/core/labels.go
index b5da2fb29d..0814924234 100644
--- a/internal/core/labels.go
+++ b/internal/core/labels.go
@@ -6,23 +6,53 @@ import (
"strings"
"github.com/testcontainers/testcontainers-go/internal"
+ "github.com/testcontainers/testcontainers-go/internal/config"
)
const (
- LabelBase = "org.testcontainers"
- LabelLang = LabelBase + ".lang"
- LabelReaper = LabelBase + ".reaper"
- LabelRyuk = LabelBase + ".ryuk"
+ // LabelBase is the base label for all testcontainers labels.
+ LabelBase = "org.testcontainers"
+
+ // LabelLang specifies the language which created the test container.
+ LabelLang = LabelBase + ".lang"
+
+ // LabelReaper identifies the container as a reaper.
+ LabelReaper = LabelBase + ".reaper"
+
+ // LabelRyuk identifies the container as a ryuk.
+ LabelRyuk = LabelBase + ".ryuk"
+
+ // LabelSessionID specifies the session ID of the container.
LabelSessionID = LabelBase + ".sessionId"
- LabelVersion = LabelBase + ".version"
+
+ // LabelVersion specifies the version of testcontainers which created the container.
+ LabelVersion = LabelBase + ".version"
+
+ // LabelReap specifies the container should be reaped by the reaper.
+ LabelReap = LabelBase + ".reap"
)
+// DefaultLabels returns the standard set of labels which
+// includes LabelSessionID if the reaper is enabled.
func DefaultLabels(sessionID string) map[string]string {
- return map[string]string{
+ labels := map[string]string{
LabelBase: "true",
LabelLang: "go",
- LabelSessionID: sessionID,
LabelVersion: internal.Version,
+ LabelSessionID: sessionID,
+ }
+
+ if !config.Read().RyukDisabled {
+ labels[LabelReap] = "true"
+ }
+
+ return labels
+}
+
+// AddDefaultLabels adds the default labels for sessionID to target.
+func AddDefaultLabels(sessionID string, target map[string]string) {
+ for k, v := range DefaultLabels(sessionID) {
+ target[k] = v
}
}
diff --git a/lifecycle.go b/lifecycle.go
index 9270a790b2..d00de3762d 100644
--- a/lifecycle.go
+++ b/lifecycle.go
@@ -33,7 +33,7 @@ type ContainerRequestHook func(ctx context.Context, req ContainerRequest) error
// - Terminating
// - Terminated
// For that, it will receive a Container, modify it and return an error if needed.
-type ContainerHook func(ctx context.Context, container Container) error
+type ContainerHook func(ctx context.Context, ctr Container) error
// ContainerLifecycleHooks is a struct that contains all the hooks that can be used
// to modify the container lifecycle. All the container lifecycle hooks except the PreCreates hooks
diff --git a/lifecycle_test.go b/lifecycle_test.go
index 04da1df835..e7e156b422 100644
--- a/lifecycle_test.go
+++ b/lifecycle_test.go
@@ -281,7 +281,7 @@ func TestPreCreateModifierHook(t *testing.T) {
// assertions
- assert.Empty(
+ require.Empty(
t,
inputNetworkingConfig.EndpointsConfig[networkName].Aliases,
"Networking config's aliases should be empty",
diff --git a/mkdocs.yml b/mkdocs.yml
index 2afab905f7..129b831968 100644
--- a/mkdocs.yml
+++ b/mkdocs.yml
@@ -112,6 +112,7 @@ nav:
- modules/vault.md
- modules/vearch.md
- modules/weaviate.md
+ - modules/yugabytedb.md
- Examples:
- examples/index.md
- examples/nginx.md
diff --git a/modulegen/_template/examples_test.go.tmpl b/modulegen/_template/examples_test.go.tmpl
index f02ab36021..ca55c61e44 100644
--- a/modulegen/_template/examples_test.go.tmpl
+++ b/modulegen/_template/examples_test.go.tmpl
@@ -1,15 +1,15 @@
-{{ $entrypoint := Entrypoint }}{{ $image := Image }}{{ $lower := ToLower }}{{ $title := Title }}package {{ $lower }}_test
+{{ $entrypoint := Entrypoint }}{{ $image := Image }}{{ $lower := ToLower }}package {{ $lower }}_test
import (
"context"
"fmt"
"log"
+ "github.com/testcontainers/testcontainers-go"
"github.com/testcontainers/testcontainers-go/{{ ParentDir }}/{{ $lower }}"
)
func Example{{ $entrypoint }}() {
- // run{{ $title }}Container {
ctx := context.Background()
{{ $lower }}Container, err := {{ $lower }}.{{ $entrypoint }}(ctx, "{{ $image }}")
diff --git a/modulegen/_template/module.go.tmpl b/modulegen/_template/module.go.tmpl
index 31e50981d0..585e853fba 100644
--- a/modulegen/_template/module.go.tmpl
+++ b/modulegen/_template/module.go.tmpl
@@ -7,13 +7,13 @@ import (
"github.com/testcontainers/testcontainers-go"
)
-// {{ $containerName }} represents the {{ $title }} container type used in the module
-type {{ $containerName }} struct {
+// Container represents the {{ $title }} container type used in the module
+type Container struct {
testcontainers.Container
}
// {{ $entrypoint }} creates an instance of the {{ $title }} container type
-func {{ $entrypoint }}(ctx context.Context, img string, opts ...testcontainers.ContainerCustomizer) (*{{ $containerName }}, error) {
+func {{ $entrypoint }}(ctx context.Context, img string, opts ...testcontainers.ContainerCustomizer) (*Container, error) {
req := testcontainers.ContainerRequest{
Image: img,
}
@@ -30,9 +30,9 @@ func {{ $entrypoint }}(ctx context.Context, img string, opts ...testcontainers.C
}
container, err := testcontainers.GenericContainer(ctx, genericContainerReq)
- var c *{{ $containerName }}
+ var c *Container
if container != nil {
- c = &{{ $containerName }}{Container: container}
+ c = &Container{Container: container}
}
if err != nil {
diff --git a/modulegen/_template/module.md.tmpl b/modulegen/_template/module.md.tmpl
index ac29fb3337..91945bd254 100644
--- a/modulegen/_template/module.md.tmpl
+++ b/modulegen/_template/module.md.tmpl
@@ -1,4 +1,4 @@
-{{ $lower := ToLower }}{{ $title := Title }}# {{ $title }}
+{{ $entrypoint := Entrypoint }}{{ $lower := ToLower }}{{ $title := Title }}# {{ $title }}
Not available until the next release of testcontainers-go :material-tag: main
@@ -17,7 +17,7 @@ go get github.com/testcontainers/testcontainers-go/{{ ParentDir }}/{{ $lower }}
## Usage example
-[Creating a {{ $title }} container](../../{{ ParentDir }}/{{ $lower }}/examples_test.go) inside_block:run{{ $title }}Container
+[Creating a {{ $title }} container](../../{{ ParentDir }}/{{ $lower }}/examples_test.go) inside_block:Example{{ $entrypoint }}
## Module Reference
diff --git a/modulegen/_template/module_test.go.tmpl b/modulegen/_template/module_test.go.tmpl
index 351ba5c8d5..1850e568c9 100644
--- a/modulegen/_template/module_test.go.tmpl
+++ b/modulegen/_template/module_test.go.tmpl
@@ -6,6 +6,7 @@ import (
"github.com/stretchr/testify/require"
+ "github.com/testcontainers/testcontainers-go"
"github.com/testcontainers/testcontainers-go/{{ ParentDir }}/{{ $lower }}"
)
diff --git a/modulegen/internal/context/types.go b/modulegen/internal/context/types.go
index 0792c249df..61d0e6217e 100644
--- a/modulegen/internal/context/types.go
+++ b/modulegen/internal/context/types.go
@@ -4,8 +4,6 @@ import (
"fmt"
"regexp"
"strings"
- "unicode"
- "unicode/utf8"
"golang.org/x/text/cases"
"golang.org/x/text/language"
@@ -22,16 +20,7 @@ type TestcontainersModule struct {
// ContainerName returns the name of the container, which is the lower-cased title of the example
// If the title is set, it will be used instead of the name
func (m *TestcontainersModule) ContainerName() string {
- name := m.Lower()
-
- if m.IsModule {
- name = m.Title()
- } else if m.TitleName != "" {
- r, n := utf8.DecodeRuneInString(m.TitleName)
- name = string(unicode.ToLower(r)) + m.TitleName[n:]
- }
-
- return name + "Container"
+ return "Container"
}
// Entrypoint returns the name of the entrypoint function, which is the lower-cased title of the example
diff --git a/modulegen/main_test.go b/modulegen/main_test.go
index 322cb25920..ad9a946236 100644
--- a/modulegen/main_test.go
+++ b/modulegen/main_test.go
@@ -17,11 +17,10 @@ import (
func TestModule(t *testing.T) {
tests := []struct {
- name string
- module context.TestcontainersModule
- expectedContainerName string
- expectedEntrypoint string
- expectedTitle string
+ name string
+ module context.TestcontainersModule
+ expectedEntrypoint string
+ expectedTitle string
}{
{
name: "Module with title",
@@ -31,9 +30,8 @@ func TestModule(t *testing.T) {
Image: "mongodb:latest",
TitleName: "MongoDB",
},
- expectedContainerName: "MongoDBContainer",
- expectedEntrypoint: "Run",
- expectedTitle: "MongoDB",
+ expectedEntrypoint: "Run",
+ expectedTitle: "MongoDB",
},
{
name: "Module without title",
@@ -42,9 +40,8 @@ func TestModule(t *testing.T) {
IsModule: true,
Image: "mongodb:latest",
},
- expectedContainerName: "MongodbContainer",
- expectedEntrypoint: "Run",
- expectedTitle: "Mongodb",
+ expectedEntrypoint: "Run",
+ expectedTitle: "Mongodb",
},
{
name: "Example with title",
@@ -54,9 +51,8 @@ func TestModule(t *testing.T) {
Image: "mongodb:latest",
TitleName: "MongoDB",
},
- expectedContainerName: "mongoDBContainer",
- expectedEntrypoint: "run",
- expectedTitle: "MongoDB",
+ expectedEntrypoint: "run",
+ expectedTitle: "MongoDB",
},
{
name: "Example without title",
@@ -65,9 +61,9 @@ func TestModule(t *testing.T) {
IsModule: false,
Image: "mongodb:latest",
},
- expectedContainerName: "mongodbContainer",
- expectedEntrypoint: "run",
- expectedTitle: "Mongodb",
+
+ expectedEntrypoint: "run",
+ expectedTitle: "Mongodb",
},
}
@@ -77,7 +73,7 @@ func TestModule(t *testing.T) {
assert.Equal(t, "mongodb", module.Lower())
assert.Equal(t, test.expectedTitle, module.Title())
- assert.Equal(t, test.expectedContainerName, module.ContainerName())
+ assert.Equal(t, "Container", module.ContainerName())
assert.Equal(t, test.expectedEntrypoint, module.Entrypoint())
})
}
@@ -148,7 +144,11 @@ func TestModule_Validate(outer *testing.T) {
for _, test := range tests {
outer.Run(test.name, func(t *testing.T) {
- assert.Equal(t, test.expectedErr, test.module.Validate())
+ if test.expectedErr != nil {
+ require.EqualError(t, test.module.Validate(), test.expectedErr.Error())
+ } else {
+ require.NoError(t, test.module.Validate())
+ }
})
}
}
@@ -277,7 +277,7 @@ func TestGenerate(t *testing.T) {
moduleDirFileInfo, err := os.Stat(moduleDirPath)
require.NoError(t, err) // error nil implies the file exist
- assert.True(t, moduleDirFileInfo.IsDir())
+ require.True(t, moduleDirFileInfo.IsDir())
moduleDocFile := filepath.Join(examplesDocTmp, moduleNameLower+".md")
_, err = os.Stat(moduleDocFile)
@@ -333,7 +333,7 @@ func TestGenerateModule(t *testing.T) {
moduleDirFileInfo, err := os.Stat(moduleDirPath)
require.NoError(t, err) // error nil implies the file exist
- assert.True(t, moduleDirFileInfo.IsDir())
+ require.True(t, moduleDirFileInfo.IsDir())
moduleDocFile := filepath.Join(modulesDocTmp, moduleNameLower+".md")
_, err = os.Stat(moduleDocFile)
@@ -362,6 +362,7 @@ func assertModuleDocContent(t *testing.T, module context.TestcontainersModule, m
lower := module.Lower()
title := module.Title()
+ entrypoint := module.Entrypoint()
data := sanitiseContent(content)
assert.Equal(t, "# "+title, data[0])
@@ -372,7 +373,7 @@ func assertModuleDocContent(t *testing.T, module context.TestcontainersModule, m
assert.Equal(t, "Please run the following command to add the "+title+" module to your Go dependencies:", data[10])
assert.Equal(t, "go get github.com/testcontainers/testcontainers-go/"+module.ParentDir()+"/"+lower, data[13])
assert.Equal(t, "", data[18])
- assert.Equal(t, "[Creating a "+title+" container](../../"+module.ParentDir()+"/"+lower+"/examples_test.go) inside_block:run"+title+"Container", data[19])
+ assert.Equal(t, "[Creating a "+title+" container](../../"+module.ParentDir()+"/"+lower+"/examples_test.go) inside_block:Example"+entrypoint, data[19])
assert.Equal(t, "", data[20])
assert.Equal(t, "The "+title+" module exposes one entrypoint function to create the "+title+" container, and this function receives three parameters:", data[31])
assert.True(t, strings.HasSuffix(data[34], "(*"+title+"Container, error)"))
@@ -387,13 +388,12 @@ func assertExamplesTestContent(t *testing.T, module context.TestcontainersModule
lower := module.Lower()
entrypoint := module.Entrypoint()
- title := module.Title()
data := sanitiseContent(content)
assert.Equal(t, "package "+lower+"_test", data[0])
- assert.Equal(t, "\t\"github.com/testcontainers/testcontainers-go/modules/"+lower+"\"", data[7])
- assert.Equal(t, "func Example"+entrypoint+"() {", data[10])
- assert.Equal(t, "\t// run"+title+"Container {", data[11])
+ assert.Equal(t, "\t\"github.com/testcontainers/testcontainers-go\"", data[7])
+ assert.Equal(t, "\t\"github.com/testcontainers/testcontainers-go/modules/"+lower+"\"", data[8])
+ assert.Equal(t, "func Example"+entrypoint+"() {", data[11])
assert.Equal(t, "\t"+lower+"Container, err := "+lower+"."+entrypoint+"(ctx, \""+module.Image+"\")", data[14])
assert.Equal(t, "\tfmt.Println(state.Running)", data[32])
assert.Equal(t, "\t// Output:", data[34])
@@ -407,8 +407,8 @@ func assertModuleTestContent(t *testing.T, module context.TestcontainersModule,
data := sanitiseContent(content)
assert.Equal(t, "package "+module.Lower()+"_test", data[0])
- assert.Equal(t, "func Test"+module.Title()+"(t *testing.T) {", data[11])
- assert.Equal(t, "\tctr, err := "+module.Lower()+"."+module.Entrypoint()+"(ctx, \""+module.Image+"\")", data[14])
+ assert.Equal(t, "func Test"+module.Title()+"(t *testing.T) {", data[12])
+ assert.Equal(t, "\tctr, err := "+module.Lower()+"."+module.Entrypoint()+"(ctx, \""+module.Image+"\")", data[15])
}
// assert content module
@@ -423,7 +423,7 @@ func assertModuleContent(t *testing.T, module context.TestcontainersModule, exam
data := sanitiseContent(content)
require.Equal(t, "package "+lower, data[0])
- require.Equal(t, "// "+containerName+" represents the "+exampleName+" container type used in the module", data[9])
+ require.Equal(t, "// Container represents the "+exampleName+" container type used in the module", data[9])
require.Equal(t, "type "+containerName+" struct {", data[10])
require.Equal(t, "// "+entrypoint+" creates an instance of the "+exampleName+" container type", data[14])
require.Equal(t, "func "+entrypoint+"(ctx context.Context, img string, opts ...testcontainers.ContainerCustomizer) (*"+containerName+", error) {", data[15])
diff --git a/modulegen/mkdocs_test.go b/modulegen/mkdocs_test.go
index 5fcf7c93ba..9ca3b3860c 100644
--- a/modulegen/mkdocs_test.go
+++ b/modulegen/mkdocs_test.go
@@ -40,9 +40,9 @@ func TestReadMkDocsConfig(t *testing.T) {
require.NoError(t, err)
require.NotNil(t, config)
- assert.Equal(t, "Testcontainers for Go", config.SiteName)
- assert.Equal(t, "https://github.com/testcontainers/testcontainers-go", config.RepoURL)
- assert.Equal(t, "edit/main/docs/", config.EditURI)
+ require.Equal(t, "Testcontainers for Go", config.SiteName)
+ require.Equal(t, "https://github.com/testcontainers/testcontainers-go", config.RepoURL)
+ require.Equal(t, "edit/main/docs/", config.EditURI)
// theme
theme := config.Theme
@@ -51,9 +51,9 @@ func TestReadMkDocsConfig(t *testing.T) {
// nav bar
nav := config.Nav
assert.Equal(t, "index.md", nav[0].Home)
- assert.NotEmpty(t, nav[2].Features)
- assert.NotEmpty(t, nav[3].Modules)
- assert.NotEmpty(t, nav[4].Examples)
+ require.NotEmpty(t, nav[2].Features)
+ require.NotEmpty(t, nav[3].Modules)
+ require.NotEmpty(t, nav[4].Examples)
}
func TestNavItems(t *testing.T) {
diff --git a/modules/artemis/artemis_test.go b/modules/artemis/artemis_test.go
index 01097463b2..74f150b031 100644
--- a/modules/artemis/artemis_test.go
+++ b/modules/artemis/artemis_test.go
@@ -76,7 +76,7 @@ func TestArtemis(t *testing.T) {
res, err := http.Get(u)
require.NoError(t, err, "failed to access console")
res.Body.Close()
- assert.Equal(t, http.StatusOK, res.StatusCode, "failed to access console")
+ require.Equal(t, http.StatusOK, res.StatusCode, "failed to access console")
if test.user != "" {
assert.Equal(t, test.user, ctr.User(), "unexpected user")
diff --git a/modules/compose/compose.go b/modules/compose/compose.go
index c63eb73bb1..fa02cde077 100644
--- a/modules/compose/compose.go
+++ b/modules/compose/compose.go
@@ -153,23 +153,6 @@ func NewDockerComposeWith(opts ...ComposeStackOption) (*dockerCompose, error) {
return nil, fmt.Errorf("initialize docker client: %w", err)
}
- reaperProvider, err := testcontainers.NewDockerProvider()
- if err != nil {
- return nil, fmt.Errorf("failed to create reaper provider for compose: %w", err)
- }
-
- var composeReaper *testcontainers.Reaper
- if !reaperProvider.Config().Config.RyukDisabled {
- // NewReaper is deprecated: we need to find a way to create the reaper for compose
- // bypassing the deprecation.
- r, err := testcontainers.NewReaper(context.Background(), testcontainers.SessionID(), reaperProvider, "")
- if err != nil {
- return nil, fmt.Errorf("failed to create reaper for compose: %w", err)
- }
-
- composeReaper = r
- }
-
composeAPI := &dockerCompose{
name: composeOptions.Identifier,
configs: composeOptions.Paths,
@@ -182,7 +165,6 @@ func NewDockerComposeWith(opts ...ComposeStackOption) (*dockerCompose, error) {
containers: make(map[string]*testcontainers.DockerContainer),
networks: make(map[string]*testcontainers.DockerNetwork),
sessionID: testcontainers.SessionID(),
- reaper: composeReaper,
}
return composeAPI, nil
diff --git a/modules/compose/compose_api.go b/modules/compose/compose_api.go
index 9f21d09e87..2c852bdaff 100644
--- a/modules/compose/compose_api.go
+++ b/modules/compose/compose_api.go
@@ -2,6 +2,7 @@ package compose
import (
"context"
+ "errors"
"fmt"
"io"
"os"
@@ -228,9 +229,6 @@ type dockerCompose struct {
// sessionID is used to identify the reaper session
sessionID string
-
- // reaper is used to clean up containers after the stack is stopped
- reaper *testcontainers.Reaper
}
func (d *dockerCompose) ServiceContainer(ctx context.Context, svcName string) (*testcontainers.DockerContainer, error) {
@@ -269,12 +267,10 @@ func (d *dockerCompose) Down(ctx context.Context, opts ...StackDownOption) error
return d.composeService.Down(ctx, d.name, options.DownOptions)
}
-func (d *dockerCompose) Up(ctx context.Context, opts ...StackUpOption) error {
+func (d *dockerCompose) Up(ctx context.Context, opts ...StackUpOption) (err error) {
d.lock.Lock()
defer d.lock.Unlock()
- var err error
-
d.project, err = d.compileProject(ctx)
if err != nil {
return err
@@ -329,27 +325,61 @@ func (d *dockerCompose) Up(ctx context.Context, opts ...StackUpOption) error {
return err
}
- if d.reaper != nil {
+ provider, err := testcontainers.NewDockerProvider(testcontainers.WithLogger(d.logger))
+ if err != nil {
+ return fmt.Errorf("new docker provider: %w", err)
+ }
+
+ var termSignals []chan bool
+ var reaper *testcontainers.Reaper
+ if !provider.Config().Config.RyukDisabled {
+ // NewReaper is deprecated: we need to find a way to create the reaper for compose
+ // bypassing the deprecation.
+ reaper, err = testcontainers.NewReaper(ctx, testcontainers.SessionID(), provider, "")
+ if err != nil {
+ return fmt.Errorf("create reaper: %w", err)
+ }
+
+ // Cleanup on error, otherwise set termSignal to nil before successful return.
+ defer func() {
+ if len(termSignals) == 0 {
+ // Need to call Connect at least once to ensure the initial
+ // connection is cleaned up.
+ termSignal, errc := reaper.Connect()
+ if errc != nil {
+ err = errors.Join(err, fmt.Errorf("reaper connect: %w", errc))
+ } else {
+ termSignal <- true
+ }
+ }
+
+ if err == nil {
+ // No need to cleanup.
+ return
+ }
+
+ for _, ts := range termSignals {
+ ts <- true
+ }
+ }()
+
+ // Connect to the reaper and set the termination signal for each network.
for _, n := range d.networks {
- termSignal, err := d.reaper.Connect()
+ termSignal, err := reaper.Connect()
if err != nil {
- return fmt.Errorf("failed to connect to reaper: %w", err)
+ return fmt.Errorf("reaper connect: %w", err)
}
- n.SetTerminationSignal(termSignal)
- // Cleanup on error, otherwise set termSignal to nil before successful return.
- defer func() {
- if termSignal != nil {
- termSignal <- true
- }
- }()
+ n.SetTerminationSignal(termSignal)
+ termSignals = append(termSignals, termSignal)
}
}
errGrpContainers, errGrpCtx := errgroup.WithContext(ctx)
+ // Lookup the containers for each service and connect them
+ // to the reaper if needed.
for _, srv := range d.project.Services {
- // we are going to connect each container to the reaper
srv := srv
errGrpContainers.Go(func() error {
dc, err := d.lookupContainer(errGrpCtx, srv.Name)
@@ -357,19 +387,14 @@ func (d *dockerCompose) Up(ctx context.Context, opts ...StackUpOption) error {
return err
}
- if d.reaper != nil {
- termSignal, err := d.reaper.Connect()
+ if reaper != nil {
+ termSignal, err := reaper.Connect()
if err != nil {
- return fmt.Errorf("failed to connect to reaper: %w", err)
+ return fmt.Errorf("reaper connect: %w", err)
}
- dc.SetTerminationSignal(termSignal)
- // Cleanup on error, otherwise set termSignal to nil before successful return.
- defer func() {
- if termSignal != nil {
- termSignal <- true
- }
- }()
+ dc.SetTerminationSignal(termSignal)
+ termSignals = append(termSignals, termSignal)
}
return nil
@@ -401,7 +426,11 @@ func (d *dockerCompose) Up(ctx context.Context, opts ...StackUpOption) error {
})
}
- return errGrpWait.Wait()
+ if err := errGrpWait.Wait(); err != nil {
+ return fmt.Errorf("wait for services: %w", err)
+ }
+
+ return nil
}
func (d *dockerCompose) WaitForService(s string, strategy wait.Strategy) ComposeStack {
@@ -486,6 +515,9 @@ func (d *dockerCompose) lookupContainer(ctx context.Context, svcName string) (*t
return ctr, nil
}
+// lookupNetworks is used to retrieve the networks that are part of the compose stack.
+//
+// Safe for concurrent calls.
func (d *dockerCompose) lookupNetworks(ctx context.Context) error {
networks, err := d.dockerClient.NetworkList(ctx, dockernetwork.ListOptions{
Filters: filters.NewArgs(
@@ -543,9 +575,7 @@ func (d *dockerCompose) compileProject(ctx context.Context) (*types.Project, err
api.OneoffLabel: "False", // default, will be overridden by `run` command
}
- for k, label := range testcontainers.GenericLabels() {
- s.CustomLabels[k] = label
- }
+ testcontainers.AddGenericLabels(s.CustomLabels)
for i, envFile := range compiledOptions.EnvFiles {
// add a label for each env file, indexed by its position
@@ -562,9 +592,7 @@ func (d *dockerCompose) compileProject(ctx context.Context) (*types.Project, err
api.VersionLabel: api.ComposeVersion,
}
- for k, label := range testcontainers.GenericLabels() {
- n.Labels[k] = label
- }
+ testcontainers.AddGenericLabels(n.Labels)
proj.Networks[key] = n
}
diff --git a/modules/compose/compose_api_test.go b/modules/compose/compose_api_test.go
index 7879dabfa9..e5f30a5257 100644
--- a/modules/compose/compose_api_test.go
+++ b/modules/compose/compose_api_test.go
@@ -48,8 +48,7 @@ func TestDockerComposeAPIStrategyForInvalidService(t *testing.T) {
WaitForService("non-existent-srv-1", wait.NewLogStrategy("started").WithStartupTimeout(10*time.Second).WithOccurrence(1)).
Up(ctx, Wait(true))
cleanup(t, compose)
- require.Error(t, err, "Expected error to be thrown because service with wait strategy is not running")
- require.Equal(t, "no container found for service name non-existent-srv-1", err.Error())
+ require.EqualError(t, err, "wait for services: no container found for service name non-existent-srv-1")
serviceNames := compose.Services()
@@ -73,9 +72,9 @@ func TestDockerComposeAPIWithWaitLogStrategy(t *testing.T) {
serviceNames := compose.Services()
- assert.Len(t, serviceNames, 2)
- assert.Contains(t, serviceNames, "api-nginx")
- assert.Contains(t, serviceNames, "api-mysql")
+ require.Len(t, serviceNames, 2)
+ require.Contains(t, serviceNames, "api-nginx")
+ require.Contains(t, serviceNames, "api-mysql")
}
func TestDockerComposeAPIWithRunServices(t *testing.T) {
@@ -170,9 +169,9 @@ func TestDockerComposeAPI_TestcontainersLabelsArePresent(t *testing.T) {
serviceNames := compose.Services()
- assert.Len(t, serviceNames, 2)
- assert.Contains(t, serviceNames, "api-nginx")
- assert.Contains(t, serviceNames, "api-mysql")
+ require.Len(t, serviceNames, 2)
+ require.Contains(t, serviceNames, "api-nginx")
+ require.Contains(t, serviceNames, "api-mysql")
// all the services in the compose has the Testcontainers Labels
for _, serviceName := range serviceNames {
@@ -213,9 +212,9 @@ func TestDockerComposeAPI_WithReaper(t *testing.T) {
serviceNames := compose.Services()
- assert.Len(t, serviceNames, 2)
- assert.Contains(t, serviceNames, "api-nginx")
- assert.Contains(t, serviceNames, "api-mysql")
+ require.Len(t, serviceNames, 2)
+ require.Contains(t, serviceNames, "api-nginx")
+ require.Contains(t, serviceNames, "api-mysql")
}
func TestDockerComposeAPI_WithoutReaper(t *testing.T) {
@@ -240,9 +239,9 @@ func TestDockerComposeAPI_WithoutReaper(t *testing.T) {
serviceNames := compose.Services()
- assert.Len(t, serviceNames, 2)
- assert.Contains(t, serviceNames, "api-nginx")
- assert.Contains(t, serviceNames, "api-mysql")
+ require.Len(t, serviceNames, 2)
+ require.Contains(t, serviceNames, "api-nginx")
+ require.Contains(t, serviceNames, "api-mysql")
}
func TestDockerComposeAPIWithStopServices(t *testing.T) {
@@ -261,9 +260,9 @@ func TestDockerComposeAPIWithStopServices(t *testing.T) {
serviceNames := compose.Services()
- assert.Len(t, serviceNames, 2)
- assert.Contains(t, serviceNames, "api-nginx")
- assert.Contains(t, serviceNames, "api-mysql")
+ require.Len(t, serviceNames, 2)
+ require.Contains(t, serviceNames, "api-nginx")
+ require.Contains(t, serviceNames, "api-mysql")
// close mysql container in purpose
mysqlContainer, err := compose.ServiceContainer(context.Background(), "api-mysql")
@@ -386,9 +385,9 @@ func TestDockerComposeAPIWithMultipleWaitStrategies(t *testing.T) {
serviceNames := compose.Services()
- assert.Len(t, serviceNames, 2)
- assert.Contains(t, serviceNames, "api-nginx")
- assert.Contains(t, serviceNames, "api-mysql")
+ require.Len(t, serviceNames, 2)
+ require.Contains(t, serviceNames, "api-nginx")
+ require.Contains(t, serviceNames, "api-mysql")
}
func TestDockerComposeAPIWithFailedStrategy(t *testing.T) {
@@ -430,9 +429,9 @@ func TestDockerComposeAPIComplex(t *testing.T) {
serviceNames := compose.Services()
- assert.Len(t, serviceNames, 2)
- assert.Contains(t, serviceNames, "api-nginx")
- assert.Contains(t, serviceNames, "api-mysql")
+ require.Len(t, serviceNames, 2)
+ require.Contains(t, serviceNames, "api-nginx")
+ require.Contains(t, serviceNames, "api-mysql")
}
func TestDockerComposeAPIWithStackReader(t *testing.T) {
@@ -641,7 +640,7 @@ func TestDockerComposeAPIVolumesDeletedOnDown(t *testing.T) {
volumeList, err := compose.dockerClient.VolumeList(ctx, volume.ListOptions{Filters: volumeListFilters})
require.NoError(t, err, "compose.dockerClient.VolumeList()")
- assert.Empty(t, volumeList.Volumes, "Volumes are not cleaned up")
+ require.Empty(t, volumeList.Volumes, "Volumes are not cleaned up")
}
func TestDockerComposeAPIWithBuild(t *testing.T) {
diff --git a/modules/etcd/etcd_test.go b/modules/etcd/etcd_test.go
index 5095ba8429..046e277cac 100644
--- a/modules/etcd/etcd_test.go
+++ b/modules/etcd/etcd_test.go
@@ -23,7 +23,7 @@ func TestRun(t *testing.T) {
c, r, err := ctr.Exec(ctx, []string{"etcdctl", "member", "list"}, tcexec.Multiplexed())
require.NoError(t, err)
- require.Equal(t, 0, c)
+ require.Zero(t, c)
output, err := io.ReadAll(r)
require.NoError(t, err)
diff --git a/modules/localstack/v1/s3_test.go b/modules/localstack/v1/s3_test.go
index 87eba46080..aa4dea378e 100644
--- a/modules/localstack/v1/s3_test.go
+++ b/modules/localstack/v1/s3_test.go
@@ -82,7 +82,7 @@ func TestS3(t *testing.T) {
Bucket: aws.String(bucketName),
})
require.NoError(t, err)
- assert.NotNil(t, outputBucket)
+ require.NotNil(t, outputBucket)
// put object
s3Key1 := "key1"
@@ -96,12 +96,12 @@ func TestS3(t *testing.T) {
ContentDisposition: aws.String("attachment"),
})
require.NoError(t, err)
- assert.NotNil(t, outputObject)
+ require.NotNil(t, outputObject)
t.Run("List Buckets", func(t *testing.T) {
output, err := s3API.ListBuckets(nil)
require.NoError(t, err)
- assert.NotNil(t, output)
+ require.NotNil(t, output)
buckets := output.Buckets
assert.Len(t, buckets, 1)
@@ -113,7 +113,7 @@ func TestS3(t *testing.T) {
Bucket: aws.String(bucketName),
})
require.NoError(t, err)
- assert.NotNil(t, output)
+ require.NotNil(t, output)
objects := output.Contents
diff --git a/modules/localstack/v2/s3_test.go b/modules/localstack/v2/s3_test.go
index 477549fb9c..09380a7665 100644
--- a/modules/localstack/v2/s3_test.go
+++ b/modules/localstack/v2/s3_test.go
@@ -88,7 +88,7 @@ func TestS3(t *testing.T) {
Bucket: aws.String(bucketName),
})
require.NoError(t, err)
- assert.NotNil(t, outputBucket)
+ require.NotNil(t, outputBucket)
// put object
s3Key1 := "key1"
@@ -102,12 +102,12 @@ func TestS3(t *testing.T) {
ContentDisposition: aws.String("attachment"),
})
require.NoError(t, err)
- assert.NotNil(t, outputObject)
+ require.NotNil(t, outputObject)
t.Run("List Buckets", func(t *testing.T) {
output, err := s3Client.ListBuckets(ctx, &s3.ListBucketsInput{})
require.NoError(t, err)
- assert.NotNil(t, output)
+ require.NotNil(t, output)
buckets := output.Buckets
assert.Len(t, buckets, 1)
@@ -119,7 +119,7 @@ func TestS3(t *testing.T) {
Bucket: aws.String(bucketName),
})
require.NoError(t, err)
- assert.NotNil(t, output)
+ require.NotNil(t, output)
objects := output.Contents
diff --git a/modules/redpanda/redpanda_test.go b/modules/redpanda/redpanda_test.go
index 09d391f794..3983b7f8ba 100644
--- a/modules/redpanda/redpanda_test.go
+++ b/modules/redpanda/redpanda_test.go
@@ -44,7 +44,7 @@ func TestRedpanda(t *testing.T) {
kafkaAdmCl := kadm.NewClient(kafkaCl)
metadata, err := kafkaAdmCl.Metadata(ctx)
require.NoError(t, err)
- assert.Len(t, metadata.Brokers, 1)
+ require.Len(t, metadata.Brokers, 1)
// Test Schema Registry API
httpCl := &http.Client{Timeout: 5 * time.Second}
@@ -55,7 +55,7 @@ func TestRedpanda(t *testing.T) {
resp, err := httpCl.Do(req)
require.NoError(t, err)
defer resp.Body.Close()
- assert.Equal(t, http.StatusOK, resp.StatusCode)
+ require.Equal(t, http.StatusOK, resp.StatusCode)
// Test Admin API
// adminAPIAddress {
@@ -67,7 +67,7 @@ func TestRedpanda(t *testing.T) {
resp, err = httpCl.Do(req)
require.NoError(t, err)
defer resp.Body.Close()
- assert.Equal(t, http.StatusOK, resp.StatusCode)
+ require.Equal(t, http.StatusOK, resp.StatusCode)
// Test produce to unknown topic
results := kafkaCl.ProduceSync(ctx, &kgo.Record{Topic: "test", Value: []byte("test message")})
@@ -289,7 +289,7 @@ func TestRedpandaWithOldVersionAndWasm(t *testing.T) {
require.NoError(t, err)
resp, err := httpCl.Do(req)
require.NoError(t, err)
- assert.Equal(t, http.StatusUnauthorized, resp.StatusCode)
+ require.Equal(t, http.StatusUnauthorized, resp.StatusCode)
resp.Body.Close()
// Successful authentication
@@ -357,7 +357,7 @@ func TestRedpandaWithTLS(t *testing.T) {
require.NoError(t, err)
resp, err := httpCl.Do(req)
require.NoError(t, err)
- assert.Equal(t, http.StatusOK, resp.StatusCode)
+ require.Equal(t, http.StatusOK, resp.StatusCode)
resp.Body.Close()
// Test Schema Registry API
@@ -368,7 +368,7 @@ func TestRedpandaWithTLS(t *testing.T) {
require.NoError(t, err)
resp, err = httpCl.Do(req)
require.NoError(t, err)
- assert.Equal(t, http.StatusOK, resp.StatusCode)
+ require.Equal(t, http.StatusOK, resp.StatusCode)
resp.Body.Close()
brokers, err := ctr.KafkaSeedBroker(ctx)
diff --git a/modules/vault/vault_test.go b/modules/vault/vault_test.go
index c55f792c2c..22b87930bb 100644
--- a/modules/vault/vault_test.go
+++ b/modules/vault/vault_test.go
@@ -50,7 +50,7 @@ func TestVault(t *testing.T) {
exec, reader, err := vaultContainer.Exec(ctx, []string{"vault", "kv", "get", "-format=json", "secret/test1"})
// }
require.NoError(t, err)
- assert.Equal(t, 0, exec)
+ require.Zero(t, exec)
bytes, err := io.ReadAll(reader)
require.NoError(t, err)
diff --git a/modules/yugabytedb/Makefile b/modules/yugabytedb/Makefile
new file mode 100644
index 0000000000..a56dee99f2
--- /dev/null
+++ b/modules/yugabytedb/Makefile
@@ -0,0 +1,5 @@
+include ../../commons-test.mk
+
+.PHONY: test
+test:
+ $(MAKE) test-yugabytedb
diff --git a/modules/yugabytedb/examples_test.go b/modules/yugabytedb/examples_test.go
new file mode 100644
index 0000000000..641fc5a53f
--- /dev/null
+++ b/modules/yugabytedb/examples_test.go
@@ -0,0 +1,155 @@
+package yugabytedb_test
+
+import (
+ "context"
+ "database/sql"
+ "fmt"
+ "log"
+ "net"
+
+ _ "github.com/lib/pq"
+ "github.com/yugabyte/gocql"
+
+ "github.com/testcontainers/testcontainers-go"
+ "github.com/testcontainers/testcontainers-go/modules/yugabytedb"
+)
+
+func ExampleRun() {
+ // runyugabyteDBContainer {
+ ctx := context.Background()
+
+ yugabytedbContainer, err := yugabytedb.Run(
+ ctx,
+ "yugabytedb/yugabyte:2024.1.3.0-b105",
+ yugabytedb.WithKeyspace("custom-keyspace"),
+ yugabytedb.WithUser("custom-user"),
+ yugabytedb.WithDatabaseName("custom-db"),
+ yugabytedb.WithDatabaseUser("custom-user"),
+ yugabytedb.WithDatabasePassword("custom-password"),
+ )
+ if err != nil {
+ log.Printf("failed to start container: %s", err)
+ return
+ }
+
+ defer func() {
+ if err := testcontainers.TerminateContainer(yugabytedbContainer); err != nil {
+ log.Printf("failed to terminate container: %s", err)
+ }
+ }()
+ // }
+
+ state, err := yugabytedbContainer.State(ctx)
+ if err != nil {
+ log.Printf("failed to get container state: %s", err)
+ return
+ }
+
+ fmt.Println(state.Running)
+
+ // Output: true
+}
+
+func ExampleContainer_YSQLConnectionString() {
+ ctx := context.Background()
+
+ yugabytedbContainer, err := yugabytedb.Run(
+ ctx,
+ "yugabytedb/yugabyte:2024.1.3.0-b105",
+ )
+ if err != nil {
+ log.Printf("failed to start container: %s", err)
+ return
+ }
+
+ defer func() {
+ if err := testcontainers.TerminateContainer(yugabytedbContainer); err != nil {
+ log.Printf("failed to terminate container: %s", err)
+ }
+ }()
+
+ connStr, err := yugabytedbContainer.YSQLConnectionString(ctx, "sslmode=disable")
+ if err != nil {
+ log.Printf("failed to get connection string: %s", err)
+ return
+ }
+
+ db, err := sql.Open("postgres", connStr)
+ if err != nil {
+ log.Printf("failed to open connection: %s", err)
+ return
+ }
+
+ defer db.Close()
+
+ var i int
+ row := db.QueryRowContext(ctx, "SELECT 1")
+ if err := row.Scan(&i); err != nil {
+ log.Printf("failed to scan row: %s", err)
+ return
+ }
+
+ fmt.Println(i)
+
+ // Output: 1
+}
+
+func ExampleContainer_newCluster() {
+ ctx := context.Background()
+
+ yugabytedbContainer, err := yugabytedb.Run(
+ ctx,
+ "yugabytedb/yugabyte:2024.1.3.0-b105",
+ )
+ if err != nil {
+ log.Printf("failed to start container: %s", err)
+ return
+ }
+
+ defer func() {
+ if err := testcontainers.TerminateContainer(yugabytedbContainer); err != nil {
+ log.Printf("failed to terminate container: %s", err)
+ }
+ }()
+
+ yugabytedbContainerHost, err := yugabytedbContainer.Host(ctx)
+ if err != nil {
+ log.Printf("failed to get container host: %s", err)
+ return
+ }
+
+ yugabyteContainerPort, err := yugabytedbContainer.MappedPort(ctx, "9042/tcp")
+ if err != nil {
+ log.Printf("failed to get container port: %s", err)
+ return
+ }
+
+ cluster := gocql.NewCluster(net.JoinHostPort(yugabytedbContainerHost, yugabyteContainerPort.Port()))
+ cluster.Keyspace = "yugabyte"
+ cluster.Authenticator = gocql.PasswordAuthenticator{
+ Username: "yugabyte",
+ Password: "yugabyte",
+ }
+
+ session, err := cluster.CreateSession()
+ if err != nil {
+ log.Printf("failed to create session: %s", err)
+ return
+ }
+
+ defer session.Close()
+
+ var i int
+ if err := session.Query(`
+ SELECT COUNT(*)
+ FROM system_schema.keyspaces
+ WHERE keyspace_name = 'yugabyte'
+ `).Scan(&i); err != nil {
+ log.Printf("failed to scan row: %s", err)
+ return
+ }
+
+ fmt.Println(i)
+
+ // Output: 1
+}
diff --git a/modules/yugabytedb/go.mod b/modules/yugabytedb/go.mod
new file mode 100644
index 0000000000..53295d1d26
--- /dev/null
+++ b/modules/yugabytedb/go.mod
@@ -0,0 +1,65 @@
+module github.com/testcontainers/testcontainers-go/modules/yugabytedb
+
+go 1.22
+
+require (
+ github.com/lib/pq v1.10.9
+ github.com/stretchr/testify v1.9.0
+ github.com/testcontainers/testcontainers-go v0.33.0
+ github.com/yugabyte/gocql v1.6.0-yb-1
+)
+
+require (
+ dario.cat/mergo v1.0.0 // indirect
+ github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
+ github.com/Microsoft/go-winio v0.6.2 // indirect
+ github.com/cenkalti/backoff/v4 v4.2.1 // indirect
+ github.com/containerd/containerd v1.7.18 // indirect
+ github.com/containerd/log v0.1.0 // indirect
+ github.com/containerd/platforms v0.2.1 // indirect
+ github.com/cpuguy83/dockercfg v0.3.2 // indirect
+ github.com/davecgh/go-spew v1.1.1 // indirect
+ github.com/distribution/reference v0.6.0 // indirect
+ github.com/docker/docker v27.1.1+incompatible // indirect
+ github.com/docker/go-connections v0.5.0 // indirect
+ github.com/docker/go-units v0.5.0 // indirect
+ github.com/felixge/httpsnoop v1.0.4 // indirect
+ github.com/go-logr/logr v1.4.1 // indirect
+ github.com/go-logr/stdr v1.2.2 // indirect
+ github.com/go-ole/go-ole v1.2.6 // indirect
+ github.com/gogo/protobuf v1.3.2 // indirect
+ github.com/golang/snappy v0.0.3 // indirect
+ github.com/google/uuid v1.6.0 // indirect
+ github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed // indirect
+ github.com/klauspost/compress v1.17.4 // indirect
+ github.com/kr/text v0.2.0 // indirect
+ github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 // indirect
+ github.com/magiconair/properties v1.8.7 // indirect
+ github.com/moby/docker-image-spec v1.3.1 // indirect
+ github.com/moby/patternmatcher v0.6.0 // indirect
+ github.com/moby/sys/sequential v0.5.0 // indirect
+ github.com/moby/sys/user v0.1.0 // indirect
+ github.com/moby/term v0.5.0 // indirect
+ github.com/morikuni/aec v1.0.0 // indirect
+ github.com/opencontainers/go-digest v1.0.0 // indirect
+ github.com/opencontainers/image-spec v1.1.0 // indirect
+ github.com/pkg/errors v0.9.1 // indirect
+ github.com/pmezard/go-difflib v1.0.0 // indirect
+ github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c // indirect
+ github.com/shirou/gopsutil/v3 v3.23.12 // indirect
+ github.com/shoenig/go-m1cpu v0.1.6 // indirect
+ github.com/sirupsen/logrus v1.9.3 // indirect
+ github.com/tklauser/go-sysconf v0.3.12 // indirect
+ github.com/tklauser/numcpus v0.6.1 // indirect
+ github.com/yusufpapurcu/wmi v1.2.3 // indirect
+ go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 // indirect
+ go.opentelemetry.io/otel v1.24.0 // indirect
+ go.opentelemetry.io/otel/metric v1.24.0 // indirect
+ go.opentelemetry.io/otel/trace v1.24.0 // indirect
+ golang.org/x/crypto v0.22.0 // indirect
+ golang.org/x/sys v0.21.0 // indirect
+ gopkg.in/inf.v0 v0.9.1 // indirect
+ gopkg.in/yaml.v3 v3.0.1 // indirect
+)
+
+replace github.com/testcontainers/testcontainers-go => ../..
diff --git a/modules/yugabytedb/go.sum b/modules/yugabytedb/go.sum
new file mode 100644
index 0000000000..188e4bba97
--- /dev/null
+++ b/modules/yugabytedb/go.sum
@@ -0,0 +1,209 @@
+dario.cat/mergo v1.0.0 h1:AGCNq9Evsj31mOgNPcLyXc+4PNABt905YmuqPYYpBWk=
+dario.cat/mergo v1.0.0/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk=
+github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 h1:bvDV9vkmnHYOMsOr4WLk+Vo07yKIzd94sVoIqshQ4bU=
+github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8=
+github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8=
+github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
+github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY=
+github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU=
+github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932 h1:mXoPYz/Ul5HYEDvkta6I8/rnYM5gSdSV2tJ6XbZuEtY=
+github.com/bitly/go-hostpool v0.0.0-20171023180738-a3a6125de932/go.mod h1:NOuUCSz6Q9T7+igc/hlvDOUdtWKryOrtFyIVABv/p7k=
+github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869 h1:DDGfHa7BWjL4YnC6+E63dPcxHo2sUxDIu8g3QgEJdRY=
+github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4=
+github.com/cenkalti/backoff/v4 v4.2.1 h1:y4OZtCnogmCPw98Zjyt5a6+QwPLGkiQsYW5oUqylYbM=
+github.com/cenkalti/backoff/v4 v4.2.1/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
+github.com/containerd/containerd v1.7.18 h1:jqjZTQNfXGoEaZdW1WwPU0RqSn1Bm2Ay/KJPUuO8nao=
+github.com/containerd/containerd v1.7.18/go.mod h1:IYEk9/IO6wAPUz2bCMVUbsfXjzw5UNP5fLz4PsUygQ4=
+github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I=
+github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo=
+github.com/containerd/platforms v0.2.1 h1:zvwtM3rz2YHPQsF2CHYM8+KtB5dvhISiXh5ZpSBQv6A=
+github.com/containerd/platforms v0.2.1/go.mod h1:XHCb+2/hzowdiut9rkudds9bE5yJ7npe7dG/wG+uFPw=
+github.com/cpuguy83/dockercfg v0.3.2 h1:DlJTyZGBDlXqUZ2Dk2Q3xHs/FtnooJJVaad2S9GKorA=
+github.com/cpuguy83/dockercfg v0.3.2/go.mod h1:sugsbF4//dDlL/i+S+rtpIWp+5h0BHJHfjj5/jFyUJc=
+github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
+github.com/creack/pty v1.1.18 h1:n56/Zwd5o6whRC5PMGretI4IdRLlmBXYNjScPaBgsbY=
+github.com/creack/pty v1.1.18/go.mod h1:MOBLtS5ELjhRRrroQr9kyvTxUAFNvYEK993ew/Vr4O4=
+github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
+github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
+github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk=
+github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
+github.com/docker/docker v27.1.1+incompatible h1:hO/M4MtV36kzKldqnA37IWhebRA+LnqqcqDja6kVaKY=
+github.com/docker/docker v27.1.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
+github.com/docker/go-connections v0.5.0 h1:USnMq7hx7gwdVZq1L49hLXaFtUdTADjXGp+uj1Br63c=
+github.com/docker/go-connections v0.5.0/go.mod h1:ov60Kzw0kKElRwhNs9UlUHAE/F9Fe6GLaXnqyDdmEXc=
+github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
+github.com/docker/go-units v0.5.0/go.mod h1:fgPhTUdO+D/Jk86RDLlptpiXQzgHJF7gydDDbaIK4Dk=
+github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
+github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
+github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
+github.com/go-logr/logr v1.4.1 h1:pKouT5E8xu9zeFC39JXRDukb6JFQPXM5p5I91188VAQ=
+github.com/go-logr/logr v1.4.1/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
+github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
+github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
+github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY=
+github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
+github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
+github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
+github.com/golang/snappy v0.0.3 h1:fHPg5GQYlCeLIPB9BZqMVR5nR9A+IM5zcgeTdjMYmLA=
+github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
+github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
+github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
+github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
+github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
+github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0 h1:YBftPWNWd4WwGqtY2yeZL2ef8rHAxPBD8KFhJpmcqms=
+github.com/grpc-ecosystem/grpc-gateway/v2 v2.16.0/go.mod h1:YN5jB8ie0yfIUg6VvR9Kz84aCaG7AsGZnLjhHbUqwPg=
+github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed h1:5upAirOpQc1Q53c0bnx2ufif5kANL7bfZWcc6VJWJd8=
+github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4=
+github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
+github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
+github.com/klauspost/compress v1.17.4 h1:Ej5ixsIri7BrIjBkRZLTo6ghwrEtHFk7ijlczPW4fZ4=
+github.com/klauspost/compress v1.17.4/go.mod h1:/dCuZOvVtNoHsyb+cuJD3itjs3NbnF6KH9zAO4BDxPM=
+github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
+github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0=
+github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
+github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
+github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
+github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
+github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
+github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw=
+github.com/lib/pq v1.10.9/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o=
+github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0 h1:6E+4a0GO5zZEnZ81pIr0yLvtUWk2if982qA3F3QD6H4=
+github.com/lufia/plan9stats v0.0.0-20211012122336-39d0f177ccd0/go.mod h1:zJYVVT2jmtg6P3p1VtQj7WsuWi/y4VnjVBn7F8KPB3I=
+github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0VQdvPDY=
+github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0=
+github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0=
+github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo=
+github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk=
+github.com/moby/patternmatcher v0.6.0/go.mod h1:hDPoyOpDY7OrrMDLaYoY3hf52gNCR/YOUYxkhApJIxc=
+github.com/moby/sys/sequential v0.5.0 h1:OPvI35Lzn9K04PBbCLW0g4LcFAJgHsvXsRyewg5lXtc=
+github.com/moby/sys/sequential v0.5.0/go.mod h1:tH2cOOs5V9MlPiXcQzRC+eEyab644PWKGRYaaV5ZZlo=
+github.com/moby/sys/user v0.1.0 h1:WmZ93f5Ux6het5iituh9x2zAG7NFY9Aqi49jjE1PaQg=
+github.com/moby/sys/user v0.1.0/go.mod h1:fKJhFOnsCN6xZ5gSfbM6zaHGgDJMrqt9/reuj4T7MmU=
+github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0=
+github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y=
+github.com/morikuni/aec v1.0.0 h1:nP9CBfwrvYnBRgY6qfDQkygYDmYwOilePFkwzv4dU8A=
+github.com/morikuni/aec v1.0.0/go.mod h1:BbKIizmSmc5MMPqRYbxO4ZU0S0+P200+tUnFx7PXmsc=
+github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
+github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
+github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug=
+github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM=
+github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
+github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
+github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
+github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
+github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c h1:ncq/mPwQF4JjgDlrVEn3C11VoGHZN7m8qihwgMEtzYw=
+github.com/power-devops/perfstat v0.0.0-20210106213030-5aafc221ea8c/go.mod h1:OmDBASR4679mdNQnz2pUhc2G8CO2JrUAVFDRBDP/hJE=
+github.com/rogpeppe/go-internal v1.8.1 h1:geMPLpDpQOgVyCg5z5GoRwLHepNdb71NXb67XFkP+Eg=
+github.com/rogpeppe/go-internal v1.8.1/go.mod h1:JeRgkft04UBgHMgCIwADu4Pn6Mtm5d4nPKWu0nJ5d+o=
+github.com/shirou/gopsutil/v3 v3.23.12 h1:z90NtUkp3bMtmICZKpC4+WaknU1eXtp5vtbQ11DgpE4=
+github.com/shirou/gopsutil/v3 v3.23.12/go.mod h1:1FrWgea594Jp7qmjHUUPlJDTPgcsb9mGnXDxavtikzM=
+github.com/shoenig/go-m1cpu v0.1.6 h1:nxdKQNcEB6vzgA2E2bvzKIYRuNj7XNJ4S/aRSwKzFtM=
+github.com/shoenig/go-m1cpu v0.1.6/go.mod h1:1JJMcUBvfNwpq05QDQVAnx3gUHr9IYF7GNg9SUEw2VQ=
+github.com/shoenig/test v0.6.4 h1:kVTaSd7WLz5WZ2IaoM0RSzRsUD+m8wRR+5qvntpn4LU=
+github.com/shoenig/test v0.6.4/go.mod h1:byHiCGXqrVaflBLAMq/srcZIHynQPQgeyvkvXnjqq0k=
+github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ=
+github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
+github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
+github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
+github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
+github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY=
+github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
+github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
+github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
+github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
+github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
+github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
+github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
+github.com/tklauser/go-sysconf v0.3.12 h1:0QaGUFOdQaIVdPgfITYzaTegZvdCjmYO52cSFAEVmqU=
+github.com/tklauser/go-sysconf v0.3.12/go.mod h1:Ho14jnntGE1fpdOqQEEaiKRpvIavV0hSfmBq8nJbHYI=
+github.com/tklauser/numcpus v0.6.1 h1:ng9scYS7az0Bk4OZLvrNXNSAO2Pxr1XXRAPyjhIx+Fk=
+github.com/tklauser/numcpus v0.6.1/go.mod h1:1XfjsgE2zo8GVw7POkMbHENHzVg3GzmoZ9fESEdAacY=
+github.com/yugabyte/gocql v1.6.0-yb-1 h1:3anNiHsJwKQ8Dn7RdmkTEuIzV1l7e9QJZ8wkOZ87ELg=
+github.com/yugabyte/gocql v1.6.0-yb-1/go.mod h1:LAokR6+vevDCrTxk52U7p6ki+4qELu4XU7JUGYa2O2M=
+github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74=
+github.com/yusufpapurcu/wmi v1.2.3 h1:E1ctvB7uKFMOJw3fdOW32DwGE9I7t++CRUEMKvFoFiw=
+github.com/yusufpapurcu/wmi v1.2.3/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0 h1:jq9TW8u3so/bN+JPT166wjOI6/vQPF6Xe7nMNIltagk=
+go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.49.0/go.mod h1:p8pYQP+m5XfbZm9fxtSKAbM6oIllS7s2AfxrChvc7iw=
+go.opentelemetry.io/otel v1.24.0 h1:0LAOdjNmQeSTzGBzduGe/rU4tZhMwL5rWgtp9Ku5Jfo=
+go.opentelemetry.io/otel v1.24.0/go.mod h1:W7b9Ozg4nkF5tWI5zsXkaKKDjdVjpD4oAt9Qi/MArHo=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0 h1:Mne5On7VWdx7omSrSSZvM4Kw7cS7NQkOOmLcgscI51U=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.19.0/go.mod h1:IPtUMKL4O3tH5y+iXVyAXqpAwMuzC1IrxVS81rummfE=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0 h1:IeMeyr1aBvBiPVYihXIaeIZba6b8E1bYp7lbdxK8CQg=
+go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.19.0/go.mod h1:oVdCUtjq9MK9BlS7TtucsQwUcXcymNiEDjgDD2jMtZU=
+go.opentelemetry.io/otel/metric v1.24.0 h1:6EhoGWWK28x1fbpA4tYTOWBkPefTDQnb8WSGXlc88kI=
+go.opentelemetry.io/otel/metric v1.24.0/go.mod h1:VYhLe1rFfxuTXLgj4CBiyz+9WYBA8pNGJgDcSFRKBco=
+go.opentelemetry.io/otel/sdk v1.19.0 h1:6USY6zH+L8uMH8L3t1enZPR3WFEmSTADlqldyHtJi3o=
+go.opentelemetry.io/otel/sdk v1.19.0/go.mod h1:NedEbbS4w3C6zElbLdPJKOpJQOrGUJ+GfzpjUvI0v1A=
+go.opentelemetry.io/otel/trace v1.24.0 h1:CsKnnL4dUAr/0llH9FKuc698G04IrpWV0MQA/Y1YELI=
+go.opentelemetry.io/otel/trace v1.24.0/go.mod h1:HPc3Xr/cOApsBI154IU0OI0HJexz+aw5uPdbs3UCjNU=
+go.opentelemetry.io/proto/otlp v1.0.0 h1:T0TX0tmXU8a3CbNXzEKGeU5mIVOdf0oykP+u2lIVU/I=
+go.opentelemetry.io/proto/otlp v1.0.0/go.mod h1:Sy6pihPLfYHkr3NkUbEhGHFhINUSI/v80hjKIs5JXpM=
+golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
+golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
+golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
+golang.org/x/crypto v0.22.0 h1:g1v0xeRhjcugydODzvb3mEM9SQ0HGp9s/nh3COQ/C30=
+golang.org/x/crypto v0.22.0/go.mod h1:vr6Su+7cTlO45qkww3VDJlzDn0ctJvRgYbC2NvXHt+M=
+golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
+golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
+golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
+golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
+golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs=
+golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg=
+golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
+golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
+golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20201204225414-ed752295db88/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
+golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
+golang.org/x/sys v0.15.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/sys v0.21.0 h1:rF+pYz3DAGSQAxAu1CbC7catZg4ebC4UIeIhKxBZvws=
+golang.org/x/sys v0.21.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
+golang.org/x/term v0.19.0 h1:+ThwsDv+tYfnJFhF4L8jITxu1tdTWRTZpdsWgEgjL6Q=
+golang.org/x/term v0.19.0/go.mod h1:2CuTdWZ7KHSQwUzKva0cbMg6q2DMI3Mmxp+gKJbskEk=
+golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
+golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
+golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
+golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
+golang.org/x/time v0.0.0-20220210224613-90d013bbcef8 h1:vVKdlvoWBphwdxWKrFZEuM0kGgGLxUOYcY4U/2Vjg44=
+golang.org/x/time v0.0.0-20220210224613-90d013bbcef8/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
+golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
+golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
+golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
+golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
+golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
+google.golang.org/genproto v0.0.0-20230920204549-e6e6cdab5c13 h1:vlzZttNJGVqTsRFU9AmdnrcO1Znh8Ew9kCD//yjigk0=
+google.golang.org/genproto/googleapis/api v0.0.0-20230913181813-007df8e322eb h1:lK0oleSc7IQsUxO3U5TjL9DWlsxpEBemh+zpB7IqhWI=
+google.golang.org/genproto/googleapis/api v0.0.0-20230913181813-007df8e322eb/go.mod h1:KjSP20unUpOx5kyQUFa7k4OJg0qeJ7DEZflGDu2p6Bk=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20231002182017-d307bd883b97 h1:6GQBEOdGkX6MMTLT9V+TjtIRZCw9VPD5Z+yHY9wMgS0=
+google.golang.org/genproto/googleapis/rpc v0.0.0-20231002182017-d307bd883b97/go.mod h1:v7nGkzlmW8P3n/bKmWBn2WpBjpOEx8Q6gMueudAmKfY=
+google.golang.org/grpc v1.64.1 h1:LKtvyfbX3UGVPFcGqJ9ItpVWW6oN/2XqTxfAnwRRXiA=
+google.golang.org/grpc v1.64.1/go.mod h1:hiQF4LFZelK2WKaP6W0L92zGHtiQdZxk8CrSdvyjeP0=
+google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
+google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
+gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
+gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
+gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
+gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
+gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
+gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
+gotest.tools/v3 v3.5.1 h1:EENdUnS3pdur5nybKYIh2Vfgc8IUNBjxDPSjtiJcOzU=
+gotest.tools/v3 v3.5.1/go.mod h1:isy3WKz7GK6uNw/sbHzfKBLvlvXwUyV06n6brMxxopU=
diff --git a/modules/yugabytedb/options.go b/modules/yugabytedb/options.go
new file mode 100644
index 0000000000..485b979468
--- /dev/null
+++ b/modules/yugabytedb/options.go
@@ -0,0 +1,53 @@
+package yugabytedb
+
+import (
+ "github.com/testcontainers/testcontainers-go"
+)
+
+// WithDatabaseName sets the initial database name for the yugabyteDB container.
+func WithDatabaseName(dbName string) testcontainers.CustomizeRequestOption {
+ return func(req *testcontainers.GenericContainerRequest) error {
+ req.Env[ysqlDatabaseNameEnv] = dbName
+ return nil
+ }
+}
+
+// WithDatabaseUser sets the initial database user for the yugabyteDB container.
+func WithDatabaseUser(dbUser string) testcontainers.CustomizeRequestOption {
+ return func(req *testcontainers.GenericContainerRequest) error {
+ req.Env[ysqlDatabaseUserEnv] = dbUser
+ return nil
+ }
+}
+
+// WithDatabasePassword sets the initial database password for the yugabyteDB container.
+func WithDatabasePassword(dbPassword string) testcontainers.CustomizeRequestOption {
+ return func(req *testcontainers.GenericContainerRequest) error {
+ req.Env[ysqlDatabasePasswordEnv] = dbPassword
+ return nil
+ }
+}
+
+// WithKeyspace sets the initial keyspace for the yugabyteDB container.
+func WithKeyspace(keyspace string) testcontainers.CustomizeRequestOption {
+ return func(req *testcontainers.GenericContainerRequest) error {
+ req.Env[ycqlKeyspaceEnv] = keyspace
+ return nil
+ }
+}
+
+// WithUser sets the initial user for the yugabyteDB container.
+func WithUser(user string) testcontainers.CustomizeRequestOption {
+ return func(req *testcontainers.GenericContainerRequest) error {
+ req.Env[ycqlUserNameEnv] = user
+ return nil
+ }
+}
+
+// WithPassword sets the initial password for the yugabyteDB container.
+func WithPassword(password string) testcontainers.CustomizeRequestOption {
+ return func(req *testcontainers.GenericContainerRequest) error {
+ req.Env[ycqlPasswordEnv] = password
+ return nil
+ }
+}
diff --git a/modules/yugabytedb/yugabytedb.go b/modules/yugabytedb/yugabytedb.go
new file mode 100644
index 0000000000..13d6e9ccb0
--- /dev/null
+++ b/modules/yugabytedb/yugabytedb.go
@@ -0,0 +1,126 @@
+package yugabytedb
+
+import (
+ "context"
+ "fmt"
+ "net"
+ "strings"
+
+ "github.com/testcontainers/testcontainers-go"
+ "github.com/testcontainers/testcontainers-go/wait"
+)
+
+const (
+ ycqlPort = "9042/tcp"
+
+ ycqlKeyspaceEnv = "YCQL_KEYSPACE"
+ ycqlUserNameEnv = "YCQL_USER"
+ ycqlPasswordEnv = "YCQL_PASSWORD"
+
+ ycqlKeyspace = "yugabyte"
+ ycqlUserName = "yugabyte"
+ ycqlPassword = "yugabyte"
+)
+
+const (
+ ysqlPort = "5433/tcp"
+
+ ysqlDatabaseNameEnv = "YSQL_DB"
+ ysqlDatabaseUserEnv = "YSQL_USER"
+ ysqlDatabasePasswordEnv = "YSQL_PASSWORD"
+
+ ysqlDatabaseName = "yugabyte"
+ ysqlDatabaseUser = "yugabyte"
+ ysqlDatabasePassword = "yugabyte"
+)
+
+// Container represents the yugabyteDB container type used in the module
+type Container struct {
+ testcontainers.Container
+
+ ysqlDatabaseName string
+ ysqlDatabaseUser string
+ ysqlDatabasePassword string
+}
+
+// Run creates an instance of the yugabyteDB container type and automatically starts it.
+// A default configuration is used for the container, but it can be customized using the
+// provided options.
+// When using default configuration values it is recommended to use the provided
+// [*Container.YSQLConnectionString] and [*Container.YCQLConfigureClusterConfig]
+// methods to use the container in their respective clients.
+func Run(ctx context.Context, img string, opts ...testcontainers.ContainerCustomizer) (*Container, error) {
+ req := testcontainers.ContainerRequest{
+ Image: img,
+ Cmd: []string{"bin/yugabyted", "start", "--background=false"},
+ WaitingFor: wait.ForAll(
+ wait.ForLog("YugabyteDB Started").WithOccurrence(1),
+ wait.ForLog("Data placement constraint successfully verified").WithOccurrence(1),
+ wait.ForListeningPort(ysqlPort),
+ wait.ForListeningPort(ycqlPort),
+ ),
+ ExposedPorts: []string{ycqlPort, ysqlPort},
+ Env: map[string]string{
+ ycqlKeyspaceEnv: ycqlKeyspace,
+ ycqlUserNameEnv: ycqlUserName,
+ ycqlPasswordEnv: ycqlPassword,
+ ysqlDatabaseNameEnv: ysqlDatabaseName,
+ ysqlDatabaseUserEnv: ysqlDatabaseUser,
+ ysqlDatabasePasswordEnv: ysqlDatabasePassword,
+ },
+ }
+
+ genericContainerReq := testcontainers.GenericContainerRequest{
+ ContainerRequest: req,
+ Started: true,
+ }
+
+ for _, opt := range opts {
+ if err := opt.Customize(&genericContainerReq); err != nil {
+ return nil, fmt.Errorf("customize: %w", err)
+ }
+ }
+
+ container, err := testcontainers.GenericContainer(ctx, genericContainerReq)
+ var c *Container
+ if container != nil {
+ c = &Container{
+ Container: container,
+ ysqlDatabaseName: req.Env[ysqlDatabaseNameEnv],
+ ysqlDatabaseUser: req.Env[ysqlDatabaseUserEnv],
+ ysqlDatabasePassword: req.Env[ysqlDatabasePasswordEnv],
+ }
+ }
+
+ if err != nil {
+ return c, fmt.Errorf("generic container: %w", err)
+ }
+
+ return c, nil
+}
+
+// YSQLConnectionString returns a connection string for the yugabyteDB container
+// using the configured database name, user, password, port, host and additional
+// arguments.
+// Additional arguments are appended to the connection string as query parameters
+// in the form of key=value pairs separated by "&".
+func (y *Container) YSQLConnectionString(ctx context.Context, args ...string) (string, error) {
+ host, err := y.Host(ctx)
+ if err != nil {
+ return "", fmt.Errorf("host: %w", err)
+ }
+
+ mappedPort, err := y.MappedPort(ctx, ysqlPort)
+ if err != nil {
+ return "", fmt.Errorf("mapped port: %w", err)
+ }
+
+ return fmt.Sprintf(
+ "postgres://%s:%s@%s/%s?%s",
+ y.ysqlDatabaseUser,
+ y.ysqlDatabasePassword,
+ net.JoinHostPort(host, mappedPort.Port()),
+ y.ysqlDatabaseName,
+ strings.Join(args, "&"),
+ ), nil
+}
diff --git a/modules/yugabytedb/yugabytedb_test.go b/modules/yugabytedb/yugabytedb_test.go
new file mode 100644
index 0000000000..38a93f0c89
--- /dev/null
+++ b/modules/yugabytedb/yugabytedb_test.go
@@ -0,0 +1,129 @@
+package yugabytedb_test
+
+import (
+ "context"
+ "database/sql"
+ "fmt"
+ "net"
+ "testing"
+
+ _ "github.com/lib/pq"
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ "github.com/yugabyte/gocql"
+
+ "github.com/testcontainers/testcontainers-go"
+ "github.com/testcontainers/testcontainers-go/modules/yugabytedb"
+)
+
+func TestYugabyteDB_YSQL(t *testing.T) {
+ t.Run("Run", func(t *testing.T) {
+ ctx := context.Background()
+
+ ctr, err := yugabytedb.Run(ctx, "yugabytedb/yugabyte:2024.1.3.0-b105")
+ testcontainers.CleanupContainer(t, ctr)
+ require.NoError(t, err)
+
+ ctrHost, err := ctr.Host(ctx)
+ require.NoError(t, err)
+
+ ctrPort, err := ctr.MappedPort(ctx, "5433/tcp")
+ require.NoError(t, err)
+
+ ysqlConnStr, err := ctr.YSQLConnectionString(ctx, "sslmode=disable")
+ require.NoError(t, err)
+ assert.Equal(t, fmt.Sprintf("postgres://yugabyte:yugabyte@%s:%s/yugabyte?sslmode=disable", ctrHost, ctrPort.Port()), ysqlConnStr)
+
+ db, err := sql.Open("postgres", ysqlConnStr)
+ require.NoError(t, err)
+ require.NotNil(t, db)
+
+ err = db.Ping()
+ require.NoError(t, err)
+ })
+
+ t.Run("custom-options", func(t *testing.T) {
+ ctx := context.Background()
+ ctr, err := yugabytedb.Run(ctx, "yugabytedb/yugabyte:2024.1.3.0-b105",
+ yugabytedb.WithDatabaseName("custom-db"),
+ yugabytedb.WithDatabaseUser("custom-user"),
+ yugabytedb.WithDatabasePassword("custom-password"),
+ )
+ testcontainers.CleanupContainer(t, ctr)
+ require.NoError(t, err)
+
+ ctrHost, err := ctr.Host(ctx)
+ require.NoError(t, err)
+
+ ctrPort, err := ctr.MappedPort(ctx, "5433/tcp")
+ require.NoError(t, err)
+
+ ysqlConnStr, err := ctr.YSQLConnectionString(ctx, "sslmode=disable")
+ require.NoError(t, err)
+ assert.Equal(t, fmt.Sprintf("postgres://custom-user:custom-password@%s:%s/custom-db?sslmode=disable", ctrHost, ctrPort.Port()), ysqlConnStr)
+
+ db, err := sql.Open("postgres", ysqlConnStr)
+ require.NoError(t, err)
+ require.NotNil(t, db)
+
+ err = db.Ping()
+ require.NoError(t, err)
+ })
+}
+
+func TestYugabyteDB_YCQL(t *testing.T) {
+ t.Run("Run", func(t *testing.T) {
+ ctx := context.Background()
+
+ ctr, err := yugabytedb.Run(ctx, "yugabytedb/yugabyte:2024.1.3.0-b105")
+ testcontainers.CleanupContainer(t, ctr)
+ require.NoError(t, err)
+
+ ctrHost, err := ctr.Host(ctx)
+ require.NoError(t, err)
+
+ ctrPort, err := ctr.MappedPort(ctx, "9042/tcp")
+ require.NoError(t, err)
+
+ cluster := gocql.NewCluster(net.JoinHostPort(ctrHost, ctrPort.Port()))
+ cluster.Keyspace = "yugabyte"
+ cluster.Authenticator = gocql.PasswordAuthenticator{
+ Username: "yugabyte",
+ Password: "yugabyte",
+ }
+
+ session, err := cluster.CreateSession()
+ require.NoError(t, err)
+ session.Close()
+ })
+
+ t.Run("custom-options", func(t *testing.T) {
+ ctx := context.Background()
+
+ ctr, err := yugabytedb.Run(ctx, "yugabytedb/yugabyte:2024.1.3.0-b105",
+ yugabytedb.WithKeyspace("custom-keyspace"),
+ yugabytedb.WithUser("custom-user"),
+ yugabytedb.WithPassword("custom-password"),
+ )
+
+ testcontainers.CleanupContainer(t, ctr)
+ require.NoError(t, err)
+
+ ctrHost, err := ctr.Host(ctx)
+ require.NoError(t, err)
+
+ ctrPort, err := ctr.MappedPort(ctx, "9042/tcp")
+ require.NoError(t, err)
+
+ cluster := gocql.NewCluster(net.JoinHostPort(ctrHost, ctrPort.Port()))
+ cluster.Keyspace = "custom-keyspace"
+ cluster.Authenticator = gocql.PasswordAuthenticator{
+ Username: "custom-user",
+ Password: "custom-password",
+ }
+
+ session, err := cluster.CreateSession()
+ require.NoError(t, err)
+ session.Close()
+ })
+}
diff --git a/options_test.go b/options_test.go
index c8a67b0b06..dbbb2964d6 100644
--- a/options_test.go
+++ b/options_test.go
@@ -115,8 +115,8 @@ func TestWithStartupCommand(t *testing.T) {
err := testcontainers.WithStartupCommand(testExec)(&req)
require.NoError(t, err)
- assert.Len(t, req.LifecycleHooks, 1)
- assert.Len(t, req.LifecycleHooks[0].PostStarts, 1)
+ require.Len(t, req.LifecycleHooks, 1)
+ require.Len(t, req.LifecycleHooks[0].PostStarts, 1)
c, err := testcontainers.GenericContainer(context.Background(), req)
testcontainers.CleanupContainer(t, c)
@@ -144,8 +144,8 @@ func TestWithAfterReadyCommand(t *testing.T) {
err := testcontainers.WithAfterReadyCommand(testExec)(&req)
require.NoError(t, err)
- assert.Len(t, req.LifecycleHooks, 1)
- assert.Len(t, req.LifecycleHooks[0].PostReadies, 1)
+ require.Len(t, req.LifecycleHooks, 1)
+ require.Len(t, req.LifecycleHooks[0].PostReadies, 1)
c, err := testcontainers.GenericContainer(context.Background(), req)
testcontainers.CleanupContainer(t, c)
diff --git a/reaper.go b/reaper.go
index 9d74f573e2..f435ec5122 100644
--- a/reaper.go
+++ b/reaper.go
@@ -1,13 +1,16 @@
package testcontainers
import (
- "bufio"
+ "bytes"
"context"
+ "errors"
"fmt"
- "math/rand"
+ "io"
"net"
+ "os"
"strings"
"sync"
+ "syscall"
"time"
"github.com/cenkalti/backoff/v4"
@@ -34,9 +37,23 @@ const (
var (
// Deprecated: it has been replaced by an internal value
ReaperDefaultImage = config.ReaperDefaultImage
- reaperInstance *Reaper // We would like to create reaper only once
- reaperMutex sync.Mutex
- reaperOnce sync.Once
+
+ // defaultReaperPort is the default port that the reaper listens on if not
+ // overridden by the RYUK_PORT environment variable.
+ defaultReaperPort = nat.Port("8080/tcp")
+
+ // errReaperNotFound is returned when no reaper container is found.
+ errReaperNotFound = errors.New("reaper not found")
+
+ // errReaperDisabled is returned if a reaper is requested but the
+ // config has it disabled.
+ errReaperDisabled = errors.New("reaper disabled")
+
+ // spawner is the singleton instance of reaperSpawner.
+ spawner = &reaperSpawner{}
+
+ // reaperAck is the expected response from the reaper container.
+ reaperAck = []byte("ACK\n")
)
// ReaperProvider represents a provider for the reaper to run itself with
@@ -47,10 +64,18 @@ type ReaperProvider interface {
}
// NewReaper creates a Reaper with a sessionID to identify containers and a provider to use
-// Deprecated: it's not possible to create a reaper anymore. Compose module uses this method
+// Deprecated: it's not possible to create a reaper any more. Compose module uses this method
// to create a reaper for the compose stack.
+//
+// The caller must call Connect at least once on the returned Reaper and use the returned
+// result otherwise the reaper will be kept open until the process exits.
func NewReaper(ctx context.Context, sessionID string, provider ReaperProvider, reaperImageName string) (*Reaper, error) {
- return reuseOrCreateReaper(ctx, sessionID, provider)
+ reaper, err := spawner.reaper(ctx, sessionID, provider)
+ if err != nil {
+ return nil, fmt.Errorf("reaper: %w", err)
+ }
+
+ return reaper, nil
}
// reaperContainerNameFromSessionID returns the container name that uniquely
@@ -61,31 +86,80 @@ func reaperContainerNameFromSessionID(sessionID string) string {
return fmt.Sprintf("reaper_%s", sessionID)
}
-// lookUpReaperContainer returns a DockerContainer type with the reaper container in the case
-// it's found in the running state, and including the labels for sessionID, reaper, and ryuk.
-// It will perform a retry with exponential backoff to allow for the container to be started and
-// avoid potential false negatives.
-func lookUpReaperContainer(ctx context.Context, sessionID string) (*DockerContainer, error) {
- dockerClient, err := NewDockerClientWithOpts(ctx)
- if err != nil {
- return nil, err
+// reaperSpawner is a singleton that manages the reaper container.
+type reaperSpawner struct {
+ instance *Reaper
+ mtx sync.Mutex
+}
+
+// port returns the port that a new reaper should listens on.
+func (r *reaperSpawner) port() nat.Port {
+ if port := os.Getenv("RYUK_PORT"); port != "" {
+ natPort, err := nat.NewPort("tcp", port)
+ if err != nil {
+ panic(fmt.Sprintf("invalid RYUK_PORT value %q: %s", port, err))
+ }
+ return natPort
}
- defer dockerClient.Close()
- // the backoff will take at most 5 seconds to find the reaper container
- // doing each attempt every 100ms
- exp := backoff.NewExponentialBackOff()
+ return defaultReaperPort
+}
- // we want random intervals between 100ms and 500ms for concurrent executions
+// backoff returns a backoff policy for the reaper spawner.
+// It will take at most 20 seconds, doing each attempt every 100ms - 250ms.
+func (r *reaperSpawner) backoff() *backoff.ExponentialBackOff {
+ // We want random intervals between 100ms and 250ms for concurrent executions
// to not be synchronized: it could be the case that multiple executions of this
// function happen at the same time (specifically when called from a different test
// process execution), and we want to avoid that they all try to find the reaper
// container at the same time.
- exp.InitialInterval = time.Duration(rand.Intn(5)*100) * time.Millisecond
- exp.RandomizationFactor = rand.Float64() * 0.5
- exp.Multiplier = rand.Float64() * 2.0
- exp.MaxInterval = 5.0 * time.Second // max interval between attempts
- exp.MaxElapsedTime = 1 * time.Minute // max time to keep trying
+ b := &backoff.ExponentialBackOff{
+ InitialInterval: time.Millisecond * 100,
+ RandomizationFactor: backoff.DefaultRandomizationFactor,
+ Multiplier: backoff.DefaultMultiplier,
+ // Adjust MaxInterval to compensate for randomization factor which can be added to
+ // returned interval so we have a maximum of 250ms.
+ MaxInterval: time.Duration(float64(time.Millisecond*250) * backoff.DefaultRandomizationFactor),
+ MaxElapsedTime: time.Second * 20,
+ Stop: backoff.Stop,
+ Clock: backoff.SystemClock,
+ }
+ b.Reset()
+
+ return b
+}
+
+// cleanup terminates the reaper container if set.
+func (r *reaperSpawner) cleanup() error {
+ r.mtx.Lock()
+ defer r.mtx.Unlock()
+
+ return r.cleanupLocked()
+}
+
+// cleanupLocked terminates the reaper container if set.
+// It must be called with the lock held.
+func (r *reaperSpawner) cleanupLocked() error {
+ if r.instance == nil {
+ return nil
+ }
+
+ err := TerminateContainer(r.instance.container)
+ r.instance = nil
+
+ return err
+}
+
+// lookupContainer returns a DockerContainer type with the reaper container in the case
+// it's found in the running state, and including the labels for sessionID, reaper, and ryuk.
+// It will perform a retry with exponential backoff to allow for the container to be started and
+// avoid potential false negatives.
+func (r *reaperSpawner) lookupContainer(ctx context.Context, sessionID string) (*DockerContainer, error) {
+ dockerClient, err := NewDockerClientWithOpts(ctx)
+ if err != nil {
+ return nil, fmt.Errorf("new client: %w", err)
+ }
+ defer dockerClient.Close()
opts := container.ListOptions{
All: true,
@@ -97,159 +171,212 @@ func lookUpReaperContainer(ctx context.Context, sessionID string) (*DockerContai
),
}
- return backoff.RetryNotifyWithData(
+ return backoff.RetryWithData(
func() (*DockerContainer, error) {
resp, err := dockerClient.ContainerList(ctx, opts)
if err != nil {
- return nil, err
+ return nil, fmt.Errorf("container list: %w", err)
}
if len(resp) == 0 {
- // reaper container not found in the running state: do not look for it again
- return nil, nil
+ // No reaper container not found.
+ return nil, backoff.Permanent(errReaperNotFound)
}
if len(resp) > 1 {
- return nil, fmt.Errorf("not possible to have multiple reaper containers found for session ID %s", sessionID)
+ return nil, fmt.Errorf("multiple reaper containers found for session ID %s", sessionID)
}
- r, err := containerFromDockerResponse(ctx, resp[0])
+ container := resp[0]
+ r, err := containerFromDockerResponse(ctx, container)
if err != nil {
- return nil, err
+ return nil, fmt.Errorf("from docker: %w", err)
}
- if r.healthStatus == types.Healthy || r.healthStatus == types.NoHealthcheck {
+ switch {
+ case r.healthStatus == types.Healthy,
+ r.healthStatus == types.NoHealthcheck:
return r, nil
- }
-
- // if a health status is present on the container, and the container is healthy, error
- if r.healthStatus != "" {
- return nil, fmt.Errorf("container %s is not healthy, wanted status=%s, got status=%s", resp[0].ID[:8], types.Healthy, r.healthStatus)
+ case r.healthStatus != "":
+ return nil, fmt.Errorf("container not healthy: %s", r.healthStatus)
}
return r, nil
},
- backoff.WithContext(exp, ctx),
- func(err error, duration time.Duration) {
- Logger.Printf("Error looking up reaper container, will retry: %v", err)
- },
+ backoff.WithContext(r.backoff(), ctx),
)
}
-// reuseOrCreateReaper returns an existing Reaper instance if it exists and is running. Otherwise, a new Reaper instance
-// will be created with a sessionID to identify containers in the same test session/program.
-func reuseOrCreateReaper(ctx context.Context, sessionID string, provider ReaperProvider) (*Reaper, error) {
- reaperMutex.Lock()
- defer reaperMutex.Unlock()
-
- // 1. if the reaper instance has been already created, return it
- if reaperInstance != nil {
- // Verify this instance is still running by checking state.
- // Can't use Container.IsRunning because the bool is not updated when Reaper is terminated
- state, err := reaperInstance.container.State(ctx)
- if err != nil {
- if !errdefs.IsNotFound(err) {
- return nil, err
+// isRunning returns an error if the container is not running.
+func (r *reaperSpawner) isRunning(ctx context.Context, ctr Container) error {
+ state, err := ctr.State(ctx)
+ if err != nil {
+ return fmt.Errorf("container state: %w", err)
+ }
+
+ if !state.Running {
+ // Use NotFound error to indicate the container is not running
+ // and should be recreated.
+ return errdefs.NotFound(fmt.Errorf("container state: %s", state.Status))
+ }
+
+ return nil
+}
+
+// retryError returns a permanent error if the error is not considered retryable.
+func (r *reaperSpawner) retryError(err error) error {
+ var timeout interface {
+ Timeout() bool
+ }
+ switch {
+ case isCleanupSafe(err),
+ createContainerFailDueToNameConflictRegex.MatchString(err.Error()),
+ errors.Is(err, syscall.ECONNREFUSED),
+ errors.Is(err, syscall.ECONNRESET),
+ errors.Is(err, syscall.ECONNABORTED),
+ errors.Is(err, syscall.ETIMEDOUT),
+ errors.Is(err, os.ErrDeadlineExceeded),
+ errors.As(err, &timeout) && timeout.Timeout(),
+ errors.Is(err, context.DeadlineExceeded),
+ errors.Is(err, context.Canceled):
+ // Retryable error.
+ return err
+ default:
+ return backoff.Permanent(err)
+ }
+}
+
+// reaper returns an existing Reaper instance if it exists and is running, otherwise
+// a new Reaper instance will be created with a sessionID to identify containers in
+// the same test session/program. If connect is true, the reaper will be connected
+// to the reaper container.
+// Returns an error if config.RyukDisabled is true.
+//
+// Safe for concurrent calls.
+func (r *reaperSpawner) reaper(ctx context.Context, sessionID string, provider ReaperProvider) (*Reaper, error) {
+ if config.Read().RyukDisabled {
+ return nil, errReaperDisabled
+ }
+
+ r.mtx.Lock()
+ defer r.mtx.Unlock()
+
+ return backoff.RetryWithData(
+ r.retryLocked(ctx, sessionID, provider),
+ backoff.WithContext(r.backoff(), ctx),
+ )
+}
+
+// retryLocked returns a function that can be used to create or reuse a reaper container.
+// If connect is true, the reaper will be connected to the reaper container.
+// It must be called with the lock held.
+func (r *reaperSpawner) retryLocked(ctx context.Context, sessionID string, provider ReaperProvider) func() (*Reaper, error) {
+ return func() (reaper *Reaper, err error) { //nolint:nonamedreturns // Needed for deferred error check.
+ reaper, err = r.reuseOrCreate(ctx, sessionID, provider)
+ // Ensure that the reaper is terminated if an error occurred.
+ defer func() {
+ if err != nil {
+ if reaper != nil {
+ err = errors.Join(err, TerminateContainer(reaper.container))
+ }
+ err = r.retryError(errors.Join(err, r.cleanupLocked()))
}
- } else if state.Running {
- return reaperInstance, nil
- }
- // else: the reaper instance has been terminated, so we need to create a new one
- reaperOnce = sync.Once{}
- }
-
- // 2. because the reaper instance has not been created yet, look for it in the Docker daemon, which
- // will happen if the reaper container has been created in the same test session but in a different
- // test process execution (e.g. when running tests in parallel), not having initialized the reaper
- // instance yet.
- reaperContainer, err := lookUpReaperContainer(context.Background(), sessionID)
- if err == nil && reaperContainer != nil {
- // The reaper container exists as a Docker container: re-use it
- Logger.Printf("🔥 Reaper obtained from Docker for this test session %s", reaperContainer.ID)
- reaperInstance, err = reuseReaperContainer(ctx, sessionID, provider, reaperContainer)
+ }()
if err != nil {
return nil, err
}
- return reaperInstance, nil
- }
+ if err = r.isRunning(ctx, reaper.container); err != nil {
+ return nil, err
+ }
- // 3. the reaper container does not exist in the Docker daemon: create it, and do it using the
- // synchronization primitive to avoid multiple executions of this function to create the reaper
- var reaperErr error
- reaperOnce.Do(func() {
- r, err := newReaper(ctx, sessionID, provider)
+ // Check we can still connect.
+ termSignal, err := reaper.connect(ctx)
if err != nil {
- reaperErr = err
- return
+ return nil, fmt.Errorf("connect: %w", err)
}
- reaperInstance, reaperErr = r, nil
- })
- if reaperErr != nil {
- reaperOnce = sync.Once{}
- return nil, reaperErr
- }
+ reaper.setOrSignal(termSignal)
+
+ r.instance = reaper
- return reaperInstance, nil
+ return reaper, nil
+ }
}
-// reuseReaperContainer constructs a Reaper from an already running reaper
-// DockerContainer.
-func reuseReaperContainer(ctx context.Context, sessionID string, provider ReaperProvider, reaperContainer *DockerContainer) (*Reaper, error) {
- endpoint, err := reaperContainer.PortEndpoint(ctx, "8080", "")
+// reuseOrCreate returns an existing Reaper instance if it exists, otherwise a new Reaper instance.
+func (r *reaperSpawner) reuseOrCreate(ctx context.Context, sessionID string, provider ReaperProvider) (*Reaper, error) {
+ if r.instance != nil {
+ // We already have an associated reaper.
+ return r.instance, nil
+ }
+
+ // Look for an existing reaper created in the same test session but in a
+ // different test process execution e.g. when running tests in parallel.
+ container, err := r.lookupContainer(context.Background(), sessionID)
if err != nil {
- return nil, err
+ if !errors.Is(err, errReaperNotFound) {
+ return nil, fmt.Errorf("look up container: %w", err)
+ }
+
+ // The reaper container was not found, continue to create a new one.
+ reaper, err := r.newReaper(ctx, sessionID, provider)
+ if err != nil {
+ return nil, fmt.Errorf("new reaper: %w", err)
+ }
+
+ return reaper, nil
}
- Logger.Printf("⏳ Waiting for Reaper port to be ready")
+ // A reaper container exists re-use it.
+ reaper, err := r.fromContainer(ctx, sessionID, provider, container)
+ if err != nil {
+ return nil, fmt.Errorf("from container %q: %w", container.ID[:8], err)
+ }
+
+ return reaper, nil
+}
- var containerJson *types.ContainerJSON
+// fromContainer constructs a Reaper from an already running reaper DockerContainer.
+func (r *reaperSpawner) fromContainer(ctx context.Context, sessionID string, provider ReaperProvider, dockerContainer *DockerContainer) (*Reaper, error) {
+ Logger.Printf("⏳ Waiting for Reaper %q to be ready", dockerContainer.ID[:8])
- if containerJson, err = reaperContainer.Inspect(ctx); err != nil {
- return nil, fmt.Errorf("failed to inspect reaper container %s: %w", reaperContainer.ID[:8], err)
+ // Reusing an existing container so we determine the port from the container's exposed ports.
+ if err := wait.ForExposedPort().
+ WithPollInterval(100*time.Millisecond).
+ SkipInternalCheck().
+ WaitUntilReady(ctx, dockerContainer); err != nil {
+ return nil, fmt.Errorf("wait for reaper %s: %w", dockerContainer.ID[:8], err)
}
- if containerJson != nil && containerJson.NetworkSettings != nil {
- for port := range containerJson.NetworkSettings.Ports {
- err := wait.ForListeningPort(port).
- WithPollInterval(100*time.Millisecond).
- WaitUntilReady(ctx, reaperContainer)
- if err != nil {
- return nil, fmt.Errorf("failed waiting for reaper container %s port %s/%s to be ready: %w",
- reaperContainer.ID[:8], port.Proto(), port.Port(), err)
- }
- }
+ endpoint, err := dockerContainer.Endpoint(ctx, "")
+ if err != nil {
+ return nil, fmt.Errorf("port endpoint: %w", err)
}
+ Logger.Printf("🔥 Reaper obtained from Docker for this test session %s", dockerContainer.ID[:8])
+
return &Reaper{
Provider: provider,
SessionID: sessionID,
Endpoint: endpoint,
- container: reaperContainer,
+ container: dockerContainer,
}, nil
}
-// newReaper creates a Reaper with a sessionID to identify containers and a
-// provider to use. Do not call this directly, use reuseOrCreateReaper instead.
-func newReaper(ctx context.Context, sessionID string, provider ReaperProvider) (*Reaper, error) {
+// newReaper creates a connected Reaper with a sessionID to identify containers
+// and a provider to use.
+func (r *reaperSpawner) newReaper(ctx context.Context, sessionID string, provider ReaperProvider) (reaper *Reaper, err error) { //nolint:nonamedreturns // Needed for deferred error check.
dockerHostMount := core.MustExtractDockerSocket(ctx)
- reaper := &Reaper{
- Provider: provider,
- SessionID: sessionID,
- }
-
- listeningPort := nat.Port("8080/tcp")
-
+ port := r.port()
tcConfig := provider.Config().Config
-
req := ContainerRequest{
Image: config.ReaperDefaultImage,
- ExposedPorts: []string{string(listeningPort)},
+ ExposedPorts: []string{string(port)},
Labels: core.DefaultLabels(sessionID),
Privileged: tcConfig.RyukPrivileged,
- WaitingFor: wait.ForListeningPort(listeningPort),
+ WaitingFor: wait.ForListeningPort(port),
Name: reaperContainerNameFromSessionID(sessionID),
HostConfigModifier: func(hc *container.HostConfig) {
hc.AutoRemove = true
@@ -268,128 +395,164 @@ func newReaper(ctx context.Context, sessionID string, provider ReaperProvider) (
req.Env["RYUK_VERBOSE"] = "true"
}
- // include reaper-specific labels to the reaper container
+ // Setup reaper-specific labels for the reaper container.
req.Labels[core.LabelReaper] = "true"
req.Labels[core.LabelRyuk] = "true"
+ delete(req.Labels, core.LabelReap)
c, err := provider.RunContainer(ctx, req)
- if err != nil {
- // We need to check whether the error is caused by a container with the same name
- // already existing due to race conditions. We manually match the error message
- // as we do not have any error types to check against.
- if createContainerFailDueToNameConflictRegex.MatchString(err.Error()) {
- // Manually retrieve the already running reaper container. However, we need to
- // use retries here as there are two possible race conditions that might lead to
- // errors: In most cases, there is a small delay between container creation and
- // actually being visible in list-requests. This means that creation might fail
- // due to name conflicts, but when we list containers with this name, we do not
- // get any results. In another case, the container might have simply died in the
- // meantime and therefore cannot be found.
- const timeout = 5 * time.Second
- const cooldown = 100 * time.Millisecond
- start := time.Now()
- var reaperContainer *DockerContainer
- for time.Since(start) < timeout {
- reaperContainer, err = lookUpReaperContainer(ctx, sessionID)
- if err == nil && reaperContainer != nil {
- break
- }
- select {
- case <-ctx.Done():
- case <-time.After(cooldown):
- }
- }
- if err != nil {
- return nil, fmt.Errorf("look up reaper container due to name conflict failed: %w", err)
- }
- // If the reaper container was not found, it is most likely to have died in
- // between as we can exclude any client errors because of the previous error
- // check. Because the reaper should only die if it performed clean-ups, we can
- // fail here as the reaper timeout needs to be increased, anyway.
- if reaperContainer == nil {
- return nil, fmt.Errorf("look up reaper container returned nil although creation failed due to name conflict")
- }
- Logger.Printf("🔥 Reaper obtained from Docker for this test session %s", reaperContainer.ID)
- reaper, err := reuseReaperContainer(ctx, sessionID, provider, reaperContainer)
- if err != nil {
- return nil, err
- }
- return reaper, nil
+ defer func() {
+ if err != nil {
+ err = errors.Join(err, TerminateContainer(c))
}
- return nil, err
+ }()
+ if err != nil {
+ return nil, fmt.Errorf("run container: %w", err)
}
- reaper.container = c
- endpoint, err := c.PortEndpoint(ctx, "8080", "")
+ endpoint, err := c.PortEndpoint(ctx, port, "")
if err != nil {
- return nil, err
+ return nil, fmt.Errorf("port endpoint: %w", err)
}
- reaper.Endpoint = endpoint
- return reaper, nil
+ return &Reaper{
+ Provider: provider,
+ SessionID: sessionID,
+ Endpoint: endpoint,
+ container: c,
+ }, nil
}
// Reaper is used to start a sidecar container that cleans up resources
type Reaper struct {
- Provider ReaperProvider
- SessionID string
- Endpoint string
- container Container
+ Provider ReaperProvider
+ SessionID string
+ Endpoint string
+ container Container
+ mtx sync.Mutex // Protects termSignal.
+ termSignal chan bool
}
-// Connect runs a goroutine which can be terminated by sending true into the returned channel
+// Connect connects to the reaper container and sends the labels to it
+// so that it can clean up the containers with the same labels.
+//
+// It returns a channel that can be closed to terminate the connection.
+// Returns an error if config.RyukDisabled is true.
func (r *Reaper) Connect() (chan bool, error) {
- conn, err := net.DialTimeout("tcp", r.Endpoint, 10*time.Second)
- if err != nil {
- return nil, fmt.Errorf("%w: Connecting to Ryuk on %s failed", err, r.Endpoint)
+ if config.Read().RyukDisabled {
+ return nil, errReaperDisabled
}
- terminationSignal := make(chan bool)
- go func(conn net.Conn) {
- sock := bufio.NewReadWriter(bufio.NewReader(conn), bufio.NewWriter(conn))
- defer conn.Close()
+ if termSignal := r.useTermSignal(); termSignal != nil {
+ return termSignal, nil
+ }
- labelFilters := []string{}
- for l, v := range core.DefaultLabels(r.SessionID) {
- labelFilters = append(labelFilters, fmt.Sprintf("label=%s=%s", l, v))
- }
+ ctx, cancel := context.WithTimeout(context.Background(), time.Second*5)
+ defer cancel()
- retryLimit := 3
- for retryLimit > 0 {
- retryLimit--
+ return r.connect(ctx)
+}
- if _, err := sock.WriteString(strings.Join(labelFilters, "&")); err != nil {
- continue
- }
+// close signals the connection to close if needed.
+// Safe for concurrent calls.
+func (r *Reaper) close() {
+ r.mtx.Lock()
+ defer r.mtx.Unlock()
- if _, err := sock.WriteString("\n"); err != nil {
- continue
- }
+ if r.termSignal != nil {
+ r.termSignal <- true
+ r.termSignal = nil
+ }
+}
- if err := sock.Flush(); err != nil {
- continue
- }
+// setOrSignal sets the reapers termSignal field if nil
+// otherwise consumes by sending true to it.
+// Safe for concurrent calls.
+func (r *Reaper) setOrSignal(termSignal chan bool) {
+ r.mtx.Lock()
+ defer r.mtx.Unlock()
+
+ if r.termSignal != nil {
+ // Already have an existing connection, close the new one.
+ termSignal <- true
+ return
+ }
- resp, err := sock.ReadString('\n')
- if err != nil {
- continue
- }
+ // First or new unused termSignal, assign for caller to reuse.
+ r.termSignal = termSignal
+}
- if resp == "ACK\n" {
- break
- }
- }
+// useTermSignal if termSignal is not nil returns it
+// and sets it to nil, otherwise returns nil.
+//
+// Safe for concurrent calls.
+func (r *Reaper) useTermSignal() chan bool {
+ r.mtx.Lock()
+ defer r.mtx.Unlock()
+
+ if r.termSignal == nil {
+ return nil
+ }
+
+ // Use existing connection.
+ term := r.termSignal
+ r.termSignal = nil
+
+ return term
+}
+// connect connects to the reaper container and sends the labels to it
+// so that it can clean up the containers with the same labels.
+//
+// It returns a channel that can be sent true to terminate the connection.
+// Returns an error if config.RyukDisabled is true.
+func (r *Reaper) connect(ctx context.Context) (chan bool, error) {
+ var d net.Dialer
+ conn, err := d.DialContext(ctx, "tcp", r.Endpoint)
+ if err != nil {
+ return nil, fmt.Errorf("dial reaper %s: %w", r.Endpoint, err)
+ }
+
+ terminationSignal := make(chan bool)
+ go func() {
+ defer conn.Close()
+ if err := r.handshake(conn); err != nil {
+ Logger.Printf("Reaper handshake failed: %s", err)
+ }
<-terminationSignal
- }(conn)
+ }()
return terminationSignal, nil
}
+// handshake sends the labels to the reaper container and reads the ACK.
+func (r *Reaper) handshake(conn net.Conn) error {
+ labels := core.DefaultLabels(r.SessionID)
+ labelFilters := make([]string, 0, len(labels))
+ for l, v := range labels {
+ labelFilters = append(labelFilters, fmt.Sprintf("label=%s=%s", l, v))
+ }
+
+ filters := []byte(strings.Join(labelFilters, "&") + "\n")
+ buf := make([]byte, 4)
+ if _, err := conn.Write(filters); err != nil {
+ return fmt.Errorf("writing filters: %w", err)
+ }
+
+ n, err := io.ReadFull(conn, buf)
+ if err != nil {
+ return fmt.Errorf("read ack: %w", err)
+ }
+
+ if !bytes.Equal(reaperAck, buf[:n]) {
+ // We have received the ACK so all done.
+ return fmt.Errorf("unexpected reaper response: %s", buf[:n])
+ }
+
+ return nil
+}
+
// Labels returns the container labels to use so that this Reaper cleans them up
// Deprecated: internally replaced by core.DefaultLabels(sessionID)
func (r *Reaper) Labels() map[string]string {
- return map[string]string{
- core.LabelLang: "go",
- core.LabelSessionID: r.SessionID,
- }
+ return GenericLabels()
}
diff --git a/reaper_test.go b/reaper_test.go
index cd0e1ee7d8..53c7284dc3 100644
--- a/reaper_test.go
+++ b/reaper_test.go
@@ -4,14 +4,15 @@ import (
"context"
"errors"
"os"
+ "strconv"
"sync"
"testing"
"time"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/network"
+ "github.com/docker/docker/errdefs"
"github.com/docker/go-connections/nat"
- "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
"github.com/testcontainers/testcontainers-go/internal/config"
@@ -23,48 +24,29 @@ import (
const testSessionID = "this-is-a-different-session-id"
type mockReaperProvider struct {
- req ContainerRequest
- hostConfig *container.HostConfig
- enpointSettings map[string]*network.EndpointSettings
- config TestcontainersConfig
- initialReaper *Reaper
- initialReaperOnce sync.Once
- t *testing.T
+ req ContainerRequest
+ hostConfig *container.HostConfig
+ endpointSettings map[string]*network.EndpointSettings
+ config TestcontainersConfig
}
-func newMockReaperProvider(t *testing.T) *mockReaperProvider {
+func newMockReaperProvider(cfg config.Config) *mockReaperProvider {
m := &mockReaperProvider{
config: TestcontainersConfig{
- Config: config.Config{},
+ Config: cfg,
},
- t: t,
- initialReaper: reaperInstance,
- //nolint:govet
- initialReaperOnce: reaperOnce,
}
- // explicitly reset the reaperInstance to nil to start from a fresh state
- reaperInstance = nil
- reaperOnce = sync.Once{}
-
return m
}
var errExpected = errors.New("expected")
-func (m *mockReaperProvider) RestoreReaperState() {
- m.t.Cleanup(func() {
- reaperInstance = m.initialReaper
- //nolint:govet
- reaperOnce = m.initialReaperOnce
- })
-}
-
func (m *mockReaperProvider) RunContainer(ctx context.Context, req ContainerRequest) (Container, error) {
m.req = req
m.hostConfig = &container.HostConfig{}
- m.enpointSettings = map[string]*network.EndpointSettings{}
+ m.endpointSettings = map[string]*network.EndpointSettings{}
if req.HostConfigModifier == nil {
req.HostConfigModifier = defaultHostConfigModifier(req)
@@ -72,7 +54,7 @@ func (m *mockReaperProvider) RunContainer(ctx context.Context, req ContainerRequ
req.HostConfigModifier(m.hostConfig)
if req.EnpointSettingsModifier != nil {
- req.EnpointSettingsModifier(m.enpointSettings)
+ req.EnpointSettingsModifier(m.endpointSettings)
}
// we're only interested in the request, so instead of mocking the Docker client
@@ -84,8 +66,8 @@ func (m *mockReaperProvider) Config() TestcontainersConfig {
return m.config
}
-// createContainerRequest creates the expected request and allows for customization
-func createContainerRequest(customize func(ContainerRequest) ContainerRequest) ContainerRequest {
+// expectedReaperRequest creates the expected reaper container request with the given customizations.
+func expectedReaperRequest(customize ...func(*ContainerRequest)) ContainerRequest {
req := ContainerRequest{
Image: config.ReaperDefaultImage,
ExposedPorts: []string{"8080/tcp"},
@@ -102,21 +84,26 @@ func createContainerRequest(customize func(ContainerRequest) ContainerRequest) C
req.Labels[core.LabelReaper] = "true"
req.Labels[core.LabelRyuk] = "true"
+ delete(req.Labels, core.LabelReap)
- if customize == nil {
- return req
+ for _, customize := range customize {
+ customize(&req)
}
- return customize(req)
+ return req
}
-func TestContainerStartsWithoutTheReaper(t *testing.T) {
- config.Reset() // reset the config using the internal method to avoid the sync.Once
- tcConfig := config.Read()
- if !tcConfig.RyukDisabled {
- t.Skip("Ryuk is enabled, skipping test")
- }
+// reaperDisable disables / enables the reaper for the duration of the test.
+func reaperDisable(t *testing.T, disabled bool) {
+ t.Helper()
+
+ config.Reset()
+ t.Setenv("TESTCONTAINERS_RYUK_DISABLED", strconv.FormatBool(disabled))
+ t.Cleanup(config.Reset)
+}
+func testContainerStart(t *testing.T) {
+ t.Helper()
ctx := context.Background()
ctr, err := GenericContainer(ctx, GenericContainerRequest{
@@ -130,58 +117,55 @@ func TestContainerStartsWithoutTheReaper(t *testing.T) {
})
CleanupContainer(t, ctr)
require.NoError(t, err)
+}
- sessionID := core.SessionID()
+// testReaperRunning validates that a reaper is running.
+func testReaperRunning(t *testing.T) {
+ t.Helper()
- reaperContainer, err := lookUpReaperContainer(ctx, sessionID)
- if err != nil {
- t.Fatal(err, "expected reaper container not found.")
- }
- if reaperContainer != nil {
- t.Fatal("expected zero reaper running.")
- }
+ ctx := context.Background()
+ sessionID := core.SessionID()
+ reaperContainer, err := spawner.lookupContainer(ctx, sessionID)
+ require.NoError(t, err)
+ require.NotNil(t, reaperContainer)
}
-func TestContainerStartsWithTheReaper(t *testing.T) {
- config.Reset() // reset the config using the internal method to avoid the sync.Once
- tcConfig := config.Read()
- if tcConfig.RyukDisabled {
- t.Skip("Ryuk is disabled, skipping test")
- }
+func TestContainer(t *testing.T) {
+ reaperDisable(t, false)
- ctx := context.Background()
+ t.Run("start/reaper-enabled", func(t *testing.T) {
+ testContainerStart(t)
+ testReaperRunning(t)
+ })
- c, err := GenericContainer(ctx, GenericContainerRequest{
- ContainerRequest: ContainerRequest{
- Image: nginxAlpineImage,
- ExposedPorts: []string{
- nginxDefaultPort,
- },
- },
- Started: true,
+ t.Run("stop/reaper-enabled", func(t *testing.T) {
+ testContainerStop(t)
+ testReaperRunning(t)
})
- CleanupContainer(t, c)
- if err != nil {
- t.Fatal(err)
- }
- sessionID := core.SessionID()
+ t.Run("terminate/reaper-enabled", func(t *testing.T) {
+ testContainerTerminate(t)
+ testReaperRunning(t)
+ })
- reaperContainer, err := lookUpReaperContainer(ctx, sessionID)
- if err != nil {
- t.Fatal(err, "expected reaper container running.")
- }
- if reaperContainer == nil {
- t.Fatal("expected one reaper to be running.")
- }
+ reaperDisable(t, true)
+
+ t.Run("start/reaper-disabled", func(t *testing.T) {
+ testContainerStart(t)
+ })
+
+ t.Run("stop/reaper-disabled", func(t *testing.T) {
+ testContainerStop(t)
+ })
+
+ t.Run("terminate/reaper-disabled", func(t *testing.T) {
+ testContainerTerminate(t)
+ })
}
-func TestContainerStopWithReaper(t *testing.T) {
- config.Reset() // reset the config using the internal method to avoid the sync.Once
- tcConfig := config.Read()
- if tcConfig.RyukDisabled {
- t.Skip("Ryuk is disabled, skipping test")
- }
+// testContainerStop tests stopping a container.
+func testContainerStop(t *testing.T) {
+ t.Helper()
ctx := context.Background()
@@ -198,37 +182,21 @@ func TestContainerStopWithReaper(t *testing.T) {
require.NoError(t, err)
state, err := nginxA.State(ctx)
- if err != nil {
- t.Fatal(err)
- }
- if state.Running != true {
- t.Fatal("The container shoud be in running state")
- }
+ require.NoError(t, err)
+ require.True(t, state.Running)
+
stopTimeout := 10 * time.Second
err = nginxA.Stop(ctx, &stopTimeout)
- if err != nil {
- t.Fatal(err)
- }
+ require.NoError(t, err)
state, err = nginxA.State(ctx)
- if err != nil {
- t.Fatal(err)
- }
- if state.Running != false {
- t.Fatal("The container shoud not be running")
- }
- if state.Status != "exited" {
- t.Fatal("The container shoud be in exited state")
- }
+ require.NoError(t, err)
+ require.False(t, state.Running)
+ require.Equal(t, "exited", state.Status)
}
-func TestContainerTerminationWithReaper(t *testing.T) {
- config.Reset() // reset the config using the internal method to avoid the sync.Once
- tcConfig := config.Read()
- if tcConfig.RyukDisabled {
- t.Skip("Ryuk is disabled, skipping test")
- }
-
+// testContainerTerminate tests terminating a container.
+func testContainerTerminate(t *testing.T) {
ctx := context.Background()
nginxA, err := GenericContainer(ctx, GenericContainerRequest{
@@ -254,323 +222,274 @@ func TestContainerTerminationWithReaper(t *testing.T) {
require.Error(t, err)
}
-func TestContainerTerminationWithoutReaper(t *testing.T) {
- config.Reset() // reset the config using the internal method to avoid the sync.Once
- tcConfig := config.Read()
- if !tcConfig.RyukDisabled {
- t.Skip("Ryuk is enabled, skipping test")
- }
+func Test_NewReaper(t *testing.T) {
+ reaperDisable(t, false)
ctx := context.Background()
- nginxA, err := GenericContainer(ctx, GenericContainerRequest{
- ContainerRequest: ContainerRequest{
- Image: nginxAlpineImage,
- ExposedPorts: []string{
- nginxDefaultPort,
+ t.Run("non-privileged", func(t *testing.T) {
+ testNewReaper(ctx, t,
+ config.Config{
+ RyukConnectionTimeout: time.Minute,
+ RyukReconnectionTimeout: 10 * time.Second,
},
- },
- Started: true,
+ expectedReaperRequest(),
+ )
})
- CleanupContainer(t, nginxA)
- if err != nil {
- t.Fatal(err)
- }
- state, err := nginxA.State(ctx)
- if err != nil {
- t.Fatal(err)
- }
- if state.Running != true {
- t.Fatal("The container shoud be in running state")
- }
- err = nginxA.Terminate(ctx)
- if err != nil {
- t.Fatal(err)
- }
-
- _, err = nginxA.State(ctx)
- if err == nil {
- t.Fatal("expected error from container inspect.")
- }
-}
-
-func Test_NewReaper(t *testing.T) {
- config.Reset() // reset the config using the internal method to avoid the sync.Once
- tcConfig := config.Read()
- if tcConfig.RyukDisabled {
- t.Skip("Ryuk is disabled, skipping test")
- }
-
- type cases struct {
- name string
- req ContainerRequest
- config TestcontainersConfig
- ctx context.Context
- env map[string]string
- }
-
- tests := []cases{
- {
- name: "non-privileged",
- req: createContainerRequest(nil),
- config: TestcontainersConfig{Config: config.Config{
- RyukConnectionTimeout: time.Minute,
- RyukReconnectionTimeout: 10 * time.Second,
- }},
- },
- {
- name: "privileged",
- req: createContainerRequest(func(req ContainerRequest) ContainerRequest {
- req.Privileged = true
- return req
- }),
- config: TestcontainersConfig{Config: config.Config{
+ t.Run("privileged", func(t *testing.T) {
+ testNewReaper(ctx, t,
+ config.Config{
RyukPrivileged: true,
RyukConnectionTimeout: time.Minute,
RyukReconnectionTimeout: 10 * time.Second,
- }},
- },
- {
- name: "configured non-default timeouts",
- req: createContainerRequest(func(req ContainerRequest) ContainerRequest {
- req.Env = map[string]string{
- "RYUK_CONNECTION_TIMEOUT": "1m0s",
- "RYUK_RECONNECTION_TIMEOUT": "10m0s",
- }
- return req
- }),
- config: TestcontainersConfig{Config: config.Config{
+ },
+ expectedReaperRequest(),
+ )
+ })
+
+ t.Run("custom-timeouts", func(t *testing.T) {
+ testNewReaper(ctx, t,
+ config.Config{
RyukPrivileged: true,
- RyukConnectionTimeout: time.Minute,
- RyukReconnectionTimeout: 10 * time.Minute,
- }},
- },
- {
- name: "configured verbose mode",
- req: createContainerRequest(func(req ContainerRequest) ContainerRequest {
+ RyukConnectionTimeout: 2 * time.Minute,
+ RyukReconnectionTimeout: 20 * time.Second,
+ },
+ expectedReaperRequest(func(req *ContainerRequest) {
req.Env = map[string]string{
- "RYUK_VERBOSE": "true",
+ "RYUK_CONNECTION_TIMEOUT": "2m0s",
+ "RYUK_RECONNECTION_TIMEOUT": "20s",
}
- return req
}),
- config: TestcontainersConfig{Config: config.Config{
+ )
+ })
+
+ t.Run("verbose", func(t *testing.T) {
+ testNewReaper(ctx, t,
+ config.Config{
RyukPrivileged: true,
RyukVerbose: true,
- }},
- },
- {
- name: "docker-host in context",
- req: createContainerRequest(func(req ContainerRequest) ContainerRequest {
- req.HostConfigModifier = func(hostConfig *container.HostConfig) {
- hostConfig.Binds = []string{core.MustExtractDockerSocket(context.Background()) + ":/var/run/docker.sock"}
+ },
+ expectedReaperRequest(func(req *ContainerRequest) {
+ req.Env = map[string]string{
+ "RYUK_VERBOSE": "true",
}
- return req
}),
- config: TestcontainersConfig{Config: config.Config{
+ )
+ })
+
+ t.Run("docker-host", func(t *testing.T) {
+ testNewReaper(context.WithValue(ctx, core.DockerHostContextKey, core.DockerSocketPathWithSchema), t,
+ config.Config{
RyukConnectionTimeout: time.Minute,
RyukReconnectionTimeout: 10 * time.Second,
- }},
- ctx: context.WithValue(context.TODO(), core.DockerHostContextKey, core.DockerSocketPathWithSchema),
- },
- {
- name: "Reaper including custom Hub prefix",
- req: createContainerRequest(func(req ContainerRequest) ContainerRequest {
- req.Image = config.ReaperDefaultImage
- req.Privileged = true
- return req
+ },
+ expectedReaperRequest(func(req *ContainerRequest) {
+ req.HostConfigModifier = func(hostConfig *container.HostConfig) {
+ hostConfig.Binds = []string{core.MustExtractDockerSocket(ctx) + ":/var/run/docker.sock"}
+ }
}),
- config: TestcontainersConfig{Config: config.Config{
+ )
+ })
+
+ t.Run("hub-prefix", func(t *testing.T) {
+ testNewReaper(context.WithValue(ctx, core.DockerHostContextKey, core.DockerSocketPathWithSchema), t,
+ config.Config{
HubImageNamePrefix: "registry.mycompany.com/mirror",
RyukPrivileged: true,
RyukConnectionTimeout: time.Minute,
RyukReconnectionTimeout: 10 * time.Second,
- }},
- },
- {
- name: "Reaper including custom Hub prefix as env var",
- req: createContainerRequest(func(req ContainerRequest) ContainerRequest {
+ },
+ expectedReaperRequest(func(req *ContainerRequest) {
req.Image = config.ReaperDefaultImage
req.Privileged = true
- return req
}),
- config: TestcontainersConfig{Config: config.Config{
+ )
+ })
+
+ t.Run("hub-prefix-env", func(t *testing.T) {
+ config.Reset()
+ t.Cleanup(config.Reset)
+
+ t.Setenv("TESTCONTAINERS_HUB_IMAGE_NAME_PREFIX", "registry.mycompany.com/mirror")
+ testNewReaper(context.WithValue(ctx, core.DockerHostContextKey, core.DockerSocketPathWithSchema), t,
+ config.Config{
RyukPrivileged: true,
RyukConnectionTimeout: time.Minute,
RyukReconnectionTimeout: 10 * time.Second,
- }},
- env: map[string]string{
- "TESTCONTAINERS_HUB_IMAGE_NAME_PREFIX": "registry.mycompany.com/mirror",
},
- },
- }
-
- for _, test := range tests {
- t.Run(test.name, func(t *testing.T) {
- if test.env != nil {
- config.Reset() // reset the config using the internal method to avoid the sync.Once
- for k, v := range test.env {
- t.Setenv(k, v)
- }
- }
+ expectedReaperRequest(func(req *ContainerRequest) {
+ req.Image = config.ReaperDefaultImage
+ req.Privileged = true
+ }),
+ )
+ })
+}
- if prefix := os.Getenv("TESTCONTAINERS_HUB_IMAGE_NAME_PREFIX"); prefix != "" {
- test.config.Config.HubImageNamePrefix = prefix
- }
+func testNewReaper(ctx context.Context, t *testing.T, cfg config.Config, expected ContainerRequest) {
+ t.Helper()
- provider := newMockReaperProvider(t)
- provider.config = test.config
- t.Cleanup(provider.RestoreReaperState)
+ if prefix := os.Getenv("TESTCONTAINERS_HUB_IMAGE_NAME_PREFIX"); prefix != "" {
+ cfg.HubImageNamePrefix = prefix
+ }
- if test.ctx == nil {
- test.ctx = context.TODO()
- }
+ provider := newMockReaperProvider(cfg)
- _, err := reuseOrCreateReaper(test.ctx, testSessionID, provider)
- // we should have errored out see mockReaperProvider.RunContainer
- require.EqualError(t, err, "expected")
+ // We need a new reaperSpawner for each test case to avoid reusing
+ // an existing reaper instance.
+ spawner := &reaperSpawner{}
+ reaper, err := spawner.reaper(ctx, testSessionID, provider)
+ cleanupReaper(t, reaper, spawner)
+ // We should have errored out see mockReaperProvider.RunContainer.
+ require.ErrorIs(t, err, errExpected)
- assert.Equal(t, test.req.Image, provider.req.Image, "expected image doesn't match the submitted request")
- assert.Equal(t, test.req.ExposedPorts, provider.req.ExposedPorts, "expected exposed ports don't match the submitted request")
- assert.Equal(t, test.req.Labels, provider.req.Labels, "expected labels don't match the submitted request")
- assert.Equal(t, test.req.Mounts, provider.req.Mounts, "expected mounts don't match the submitted request")
- assert.Equal(t, test.req.WaitingFor, provider.req.WaitingFor, "expected waitingFor don't match the submitted request")
- assert.Equal(t, test.req.Env, provider.req.Env, "expected env doesn't match the submitted request")
+ require.Equal(t, expected.Image, provider.req.Image, "expected image doesn't match the submitted request")
+ require.Equal(t, expected.ExposedPorts, provider.req.ExposedPorts, "expected exposed ports don't match the submitted request")
+ require.Equal(t, expected.Labels, provider.req.Labels, "expected labels don't match the submitted request")
+ require.Equal(t, expected.Mounts, provider.req.Mounts, "expected mounts don't match the submitted request")
+ require.Equal(t, expected.WaitingFor, provider.req.WaitingFor, "expected waitingFor don't match the submitted request")
+ require.Equal(t, expected.Env, provider.req.Env, "expected env doesn't match the submitted request")
- // checks for reaper's preCreationCallback fields
- assert.Equal(t, container.NetworkMode("bridge"), provider.hostConfig.NetworkMode, "expected networkMode doesn't match the submitted request")
- assert.True(t, provider.hostConfig.AutoRemove, "expected networkMode doesn't match the submitted request")
- })
- }
+ // checks for reaper's preCreationCallback fields
+ require.Equal(t, container.NetworkMode(Bridge), provider.hostConfig.NetworkMode, "expected networkMode doesn't match the submitted request")
+ require.True(t, provider.hostConfig.AutoRemove, "expected networkMode doesn't match the submitted request")
}
func Test_ReaperReusedIfHealthy(t *testing.T) {
- config.Reset() // reset the config using the internal method to avoid the sync.Once
- tcConfig := config.Read()
- if tcConfig.RyukDisabled {
- t.Skip("Ryuk is disabled, skipping test")
- }
-
- testProvider := newMockReaperProvider(t)
- t.Cleanup(testProvider.RestoreReaperState)
+ reaperDisable(t, false)
SkipIfProviderIsNotHealthy(t)
ctx := context.Background()
// As other integration tests run with the (shared) Reaper as well, re-use the instance to not interrupt other tests
- wasReaperRunning := reaperInstance != nil
+ if spawner.instance != nil {
+ t.Cleanup(func() {
+ require.NoError(t, spawner.cleanup())
+ })
+ }
+
+ provider, err := ProviderDocker.GetProvider()
+ require.NoError(t, err)
- provider, _ := ProviderDocker.GetProvider()
- reaper, err := reuseOrCreateReaper(context.WithValue(ctx, core.DockerHostContextKey, provider.(*DockerProvider).host), testSessionID, provider)
+ reaper, err := spawner.reaper(context.WithValue(ctx, core.DockerHostContextKey, provider.(*DockerProvider).host), testSessionID, provider)
+ cleanupReaper(t, reaper, spawner)
require.NoError(t, err, "creating the Reaper should not error")
- reaperReused, err := reuseOrCreateReaper(context.WithValue(ctx, core.DockerHostContextKey, provider.(*DockerProvider).host), testSessionID, provider)
+ reaperReused, err := spawner.reaper(context.WithValue(ctx, core.DockerHostContextKey, provider.(*DockerProvider).host), testSessionID, provider)
+ cleanupReaper(t, reaper, spawner)
require.NoError(t, err, "reusing the Reaper should not error")
- // assert that the internal state of both reaper instances is the same
- assert.Equal(t, reaper.SessionID, reaperReused.SessionID, "expecting the same SessionID")
- assert.Equal(t, reaper.Endpoint, reaperReused.Endpoint, "expecting the same reaper endpoint")
- assert.Equal(t, reaper.Provider, reaperReused.Provider, "expecting the same container provider")
- assert.Equal(t, reaper.container.GetContainerID(), reaperReused.container.GetContainerID(), "expecting the same container ID")
- assert.Equal(t, reaper.container.SessionID(), reaperReused.container.SessionID(), "expecting the same session ID")
-
- terminate, err := reaper.Connect()
- defer func(term chan bool) {
- term <- true
- }(terminate)
- require.NoError(t, err, "connecting to Reaper should be successful")
- if !wasReaperRunning {
- CleanupContainer(t, reaper.container)
- }
+ // Ensure the internal state of both reaper instances is the same
+ require.Equal(t, reaper.SessionID, reaperReused.SessionID, "expecting the same SessionID")
+ require.Equal(t, reaper.Endpoint, reaperReused.Endpoint, "expecting the same reaper endpoint")
+ require.Equal(t, reaper.Provider, reaperReused.Provider, "expecting the same container provider")
+ require.Equal(t, reaper.container.GetContainerID(), reaperReused.container.GetContainerID(), "expecting the same container ID")
+ require.Equal(t, reaper.container.SessionID(), reaperReused.container.SessionID(), "expecting the same session ID")
+
+ termSignal, err := reaper.Connect()
+ cleanupTermSignal(t, termSignal)
+ require.NoError(t, err, "connecting to Reaper should be successful")
}
func Test_RecreateReaperIfTerminated(t *testing.T) {
- config.Reset() // reset the config using the internal method to avoid the sync.Once
- tcConfig := config.Read()
- if tcConfig.RyukDisabled {
- t.Skip("Ryuk is disabled, skipping test")
- }
-
- mockProvider := newMockReaperProvider(t)
- t.Cleanup(mockProvider.RestoreReaperState)
+ reaperDisable(t, false)
SkipIfProviderIsNotHealthy(t)
- provider, _ := ProviderDocker.GetProvider()
+ provider, err := ProviderDocker.GetProvider()
+ require.NoError(t, err)
+
ctx := context.Background()
- reaper, err := reuseOrCreateReaper(context.WithValue(ctx, core.DockerHostContextKey, provider.(*DockerProvider).host), testSessionID, provider)
+ reaper, err := spawner.reaper(context.WithValue(ctx, core.DockerHostContextKey, provider.(*DockerProvider).host), testSessionID, provider)
+ cleanupReaper(t, reaper, spawner)
require.NoError(t, err, "creating the Reaper should not error")
- terminate, err := reaper.Connect()
- require.NoError(t, err, "connecting to Reaper should be successful")
- terminate <- true
+ termSignal, err := reaper.Connect()
+ if termSignal != nil {
+ termSignal <- true
+ }
+ require.NoError(t, err)
- // Wait for ryuk's default timeout (10s) + 1s to allow for a graceful shutdown/cleanup of the container.
- time.Sleep(11 * time.Second)
+ // Wait for up to ryuk's default reconnect timeout + 1s to allow for a graceful shutdown/cleanup of the container.
+ timeout := time.NewTimer(time.Second * 11)
+ t.Cleanup(func() {
+ timeout.Stop()
+ })
+ for {
+ state, err := reaper.container.State(ctx)
+ if err != nil {
+ if errdefs.IsNotFound(err) {
+ break
+ }
+ require.NoError(t, err)
+ }
+
+ if !state.Running {
+ break
+ }
+
+ select {
+ case <-timeout.C:
+ t.Fatal("reaper container should have been terminated")
+ default:
+ }
- recreatedReaper, err := reuseOrCreateReaper(context.WithValue(ctx, core.DockerHostContextKey, provider.(*DockerProvider).host), testSessionID, provider)
+ time.Sleep(time.Millisecond * 100)
+ }
+
+ recreatedReaper, err := spawner.reaper(context.WithValue(ctx, core.DockerHostContextKey, provider.(*DockerProvider).host), testSessionID, provider)
+ cleanupReaper(t, recreatedReaper, spawner)
require.NoError(t, err, "creating the Reaper should not error")
- assert.NotEqual(t, reaper.container.GetContainerID(), recreatedReaper.container.GetContainerID(), "expected different container ID")
+ require.NotEqual(t, reaper.container.GetContainerID(), recreatedReaper.container.GetContainerID(), "expected different container ID")
- terminate, err = recreatedReaper.Connect()
- defer func(term chan bool) {
- term <- true
- }(terminate)
+ recreatedTermSignal, err := recreatedReaper.Connect()
+ cleanupTermSignal(t, recreatedTermSignal)
require.NoError(t, err, "connecting to Reaper should be successful")
- CleanupContainer(t, recreatedReaper.container)
}
func TestReaper_reuseItFromOtherTestProgramUsingDocker(t *testing.T) {
- config.Reset() // reset the config using the internal method to avoid the sync.Once
- tcConfig := config.Read()
- if tcConfig.RyukDisabled {
- t.Skip("Ryuk is disabled, skipping test")
- }
+ reaperDisable(t, false)
- mockProvider := &mockReaperProvider{
- initialReaper: reaperInstance,
- //nolint:govet
- initialReaperOnce: reaperOnce,
- t: t,
- }
- t.Cleanup(mockProvider.RestoreReaperState)
-
- // explicitly set the reaperInstance to nil to simulate another test program in the same session accessing the same reaper
- reaperInstance = nil
- reaperOnce = sync.Once{}
+ // Explicitly set the reaper instance to nil to simulate another test
+ // program in the same session accessing the same reaper.
+ spawner.instance = nil
SkipIfProviderIsNotHealthy(t)
ctx := context.Background()
- // As other integration tests run with the (shared) Reaper as well, re-use the instance to not interrupt other tests
- wasReaperRunning := reaperInstance != nil
+ // As other integration tests run with the (shared) Reaper as well,
+ // re-use the instance to not interrupt other tests.
+ if spawner.instance != nil {
+ t.Cleanup(func() {
+ require.NoError(t, spawner.cleanup())
+ })
+ }
- provider, _ := ProviderDocker.GetProvider()
- reaper, err := reuseOrCreateReaper(context.WithValue(ctx, core.DockerHostContextKey, provider.(*DockerProvider).host), testSessionID, provider)
+ provider, err := ProviderDocker.GetProvider()
+ require.NoError(t, err)
+
+ reaper, err := spawner.reaper(context.WithValue(ctx, core.DockerHostContextKey, provider.(*DockerProvider).host), testSessionID, provider)
+ cleanupReaper(t, reaper, spawner)
require.NoError(t, err, "creating the Reaper should not error")
- // explicitly reset the reaperInstance to nil to simulate another test program in the same session accessing the same reaper
- reaperInstance = nil
- reaperOnce = sync.Once{}
+ // Explicitly reset the reaper instance to nil to simulate another test
+ // program in the same session accessing the same reaper.
+ spawner.instance = nil
- reaperReused, err := reuseOrCreateReaper(context.WithValue(ctx, core.DockerHostContextKey, provider.(*DockerProvider).host), testSessionID, provider)
+ reaperReused, err := spawner.reaper(context.WithValue(ctx, core.DockerHostContextKey, provider.(*DockerProvider).host), testSessionID, provider)
+ cleanupReaper(t, reaper, spawner)
require.NoError(t, err, "reusing the Reaper should not error")
- // assert that the internal state of both reaper instances is the same
- assert.Equal(t, reaper.SessionID, reaperReused.SessionID, "expecting the same SessionID")
- assert.Equal(t, reaper.Endpoint, reaperReused.Endpoint, "expecting the same reaper endpoint")
- assert.Equal(t, reaper.Provider, reaperReused.Provider, "expecting the same container provider")
- assert.Equal(t, reaper.container.GetContainerID(), reaperReused.container.GetContainerID(), "expecting the same container ID")
- assert.Equal(t, reaper.container.SessionID(), reaperReused.container.SessionID(), "expecting the same session ID")
-
- terminate, err := reaper.Connect()
- defer func(term chan bool) {
- term <- true
- }(terminate)
- require.NoError(t, err, "connecting to Reaper should be successful")
- if !wasReaperRunning {
- CleanupContainer(t, reaper.container)
- }
+ // Ensure that the internal state of both reaper instances is the same.
+ require.Equal(t, reaper.SessionID, reaperReused.SessionID, "expecting the same SessionID")
+ require.Equal(t, reaper.Endpoint, reaperReused.Endpoint, "expecting the same reaper endpoint")
+ require.Equal(t, reaper.Provider, reaperReused.Provider, "expecting the same container provider")
+ require.Equal(t, reaper.container.GetContainerID(), reaperReused.container.GetContainerID(), "expecting the same container ID")
+ require.Equal(t, reaper.container.SessionID(), reaperReused.container.SessionID(), "expecting the same session ID")
+
+ termSignal, err := reaper.Connect()
+ cleanupTermSignal(t, termSignal)
+ require.NoError(t, err, "connecting to Reaper should be successful")
}
// TestReaper_ReuseRunning tests whether reusing the reaper if using
@@ -581,15 +500,11 @@ func TestReaper_reuseItFromOtherTestProgramUsingDocker(t *testing.T) {
// already running for the same session id by returning its container instance
// instead.
func TestReaper_ReuseRunning(t *testing.T) {
- config.Reset() // reset the config using the internal method to avoid the sync.Once
- tcConfig := config.Read()
- if tcConfig.RyukDisabled {
- t.Skip("Ryuk is disabled, skipping test")
- }
+ reaperDisable(t, false)
const concurrency = 64
- timeout, cancel := context.WithTimeout(context.Background(), 5*time.Minute)
+ timeout, cancel := context.WithTimeout(context.Background(), time.Minute)
defer cancel()
sessionID := SessionID()
@@ -600,27 +515,54 @@ func TestReaper_ReuseRunning(t *testing.T) {
obtainedReaperContainerIDs := make([]string, concurrency)
var wg sync.WaitGroup
for i := 0; i < concurrency; i++ {
- i := i
wg.Add(1)
- go func() {
+ go func(i int) {
defer wg.Done()
- reaperContainer, err := lookUpReaperContainer(timeout, sessionID)
- if err == nil && reaperContainer != nil {
- // Found.
- obtainedReaperContainerIDs[i] = reaperContainer.GetContainerID()
- return
- }
- // Not found -> create.
- createdReaper, err := newReaper(timeout, sessionID, dockerProvider)
- require.NoError(t, err, "new reaper should not fail")
- obtainedReaperContainerIDs[i] = createdReaper.container.GetContainerID()
- }()
+ spawner := &reaperSpawner{}
+ reaper, err := spawner.reaper(timeout, sessionID, dockerProvider)
+ cleanupReaper(t, reaper, spawner)
+ require.NoError(t, err)
+
+ obtainedReaperContainerIDs[i] = reaper.container.GetContainerID()
+ }(i)
}
wg.Wait()
// Assure that all calls returned the same container.
firstContainerID := obtainedReaperContainerIDs[0]
for i, containerID := range obtainedReaperContainerIDs {
- assert.Equal(t, firstContainerID, containerID, "call %d should have returned same container id", i)
+ require.Equal(t, firstContainerID, containerID, "call %d should have returned same container id", i)
+ }
+}
+
+func TestSpawnerBackoff(t *testing.T) {
+ b := spawner.backoff()
+ for i := 0; i < 100; i++ {
+ require.LessOrEqual(t, b.NextBackOff(), time.Millisecond*250, "backoff should not exceed max interval")
+ }
+}
+
+// cleanupReaper schedules reaper for cleanup if it's not nil.
+func cleanupReaper(t *testing.T, reaper *Reaper, spawner *reaperSpawner) {
+ t.Helper()
+
+ if reaper == nil {
+ return
}
+
+ t.Cleanup(func() {
+ reaper.close()
+ require.NoError(t, spawner.cleanup())
+ })
+}
+
+// cleanupTermSignal ensures that termSignal
+func cleanupTermSignal(t *testing.T, termSignal chan bool) {
+ t.Helper()
+
+ t.Cleanup(func() {
+ if termSignal != nil {
+ termSignal <- true
+ }
+ })
}
diff --git a/sonar-project.properties b/sonar-project.properties
index 1b5cb92513..b0531a66c1 100644
--- a/sonar-project.properties
+++ b/sonar-project.properties
@@ -18,4 +18,4 @@ sonar.test.inclusions=**/*_test.go
sonar.test.exclusions=**/vendor/**
sonar.go.coverage.reportPaths=**/coverage.out
-sonar.go.tests.reportPaths=TEST-unit.xml,examples/nginx/TEST-unit.xml,examples/toxiproxy/TEST-unit.xml,modulegen/TEST-unit.xml,modules/artemis/TEST-unit.xml,modules/azurite/TEST-unit.xml,modules/cassandra/TEST-unit.xml,modules/chroma/TEST-unit.xml,modules/clickhouse/TEST-unit.xml,modules/cockroachdb/TEST-unit.xml,modules/compose/TEST-unit.xml,modules/consul/TEST-unit.xml,modules/couchbase/TEST-unit.xml,modules/databend/TEST-unit.xml,modules/dolt/TEST-unit.xml,modules/dynamodb/TEST-unit.xml,modules/elasticsearch/TEST-unit.xml,modules/etcd/TEST-unit.xml,modules/gcloud/TEST-unit.xml,modules/grafana-lgtm/TEST-unit.xml,modules/inbucket/TEST-unit.xml,modules/influxdb/TEST-unit.xml,modules/k3s/TEST-unit.xml,modules/k6/TEST-unit.xml,modules/kafka/TEST-unit.xml,modules/localstack/TEST-unit.xml,modules/mariadb/TEST-unit.xml,modules/milvus/TEST-unit.xml,modules/minio/TEST-unit.xml,modules/mockserver/TEST-unit.xml,modules/mongodb/TEST-unit.xml,modules/mssql/TEST-unit.xml,modules/mysql/TEST-unit.xml,modules/nats/TEST-unit.xml,modules/neo4j/TEST-unit.xml,modules/ollama/TEST-unit.xml,modules/openfga/TEST-unit.xml,modules/openldap/TEST-unit.xml,modules/opensearch/TEST-unit.xml,modules/postgres/TEST-unit.xml,modules/pulsar/TEST-unit.xml,modules/qdrant/TEST-unit.xml,modules/rabbitmq/TEST-unit.xml,modules/redis/TEST-unit.xml,modules/redpanda/TEST-unit.xml,modules/registry/TEST-unit.xml,modules/surrealdb/TEST-unit.xml,modules/valkey/TEST-unit.xml,modules/vault/TEST-unit.xml,modules/vearch/TEST-unit.xml,modules/weaviate/TEST-unit.xml
+sonar.go.tests.reportPaths=TEST-unit.xml,examples/nginx/TEST-unit.xml,examples/toxiproxy/TEST-unit.xml,modulegen/TEST-unit.xml,modules/artemis/TEST-unit.xml,modules/azurite/TEST-unit.xml,modules/cassandra/TEST-unit.xml,modules/chroma/TEST-unit.xml,modules/clickhouse/TEST-unit.xml,modules/cockroachdb/TEST-unit.xml,modules/compose/TEST-unit.xml,modules/consul/TEST-unit.xml,modules/couchbase/TEST-unit.xml,modules/databend/TEST-unit.xml,modules/dolt/TEST-unit.xml,modules/dynamodb/TEST-unit.xml,modules/elasticsearch/TEST-unit.xml,modules/etcd/TEST-unit.xml,modules/gcloud/TEST-unit.xml,modules/grafana-lgtm/TEST-unit.xml,modules/inbucket/TEST-unit.xml,modules/influxdb/TEST-unit.xml,modules/k3s/TEST-unit.xml,modules/k6/TEST-unit.xml,modules/kafka/TEST-unit.xml,modules/localstack/TEST-unit.xml,modules/mariadb/TEST-unit.xml,modules/milvus/TEST-unit.xml,modules/minio/TEST-unit.xml,modules/mockserver/TEST-unit.xml,modules/mongodb/TEST-unit.xml,modules/mssql/TEST-unit.xml,modules/mysql/TEST-unit.xml,modules/nats/TEST-unit.xml,modules/neo4j/TEST-unit.xml,modules/ollama/TEST-unit.xml,modules/openfga/TEST-unit.xml,modules/openldap/TEST-unit.xml,modules/opensearch/TEST-unit.xml,modules/postgres/TEST-unit.xml,modules/pulsar/TEST-unit.xml,modules/qdrant/TEST-unit.xml,modules/rabbitmq/TEST-unit.xml,modules/redis/TEST-unit.xml,modules/redpanda/TEST-unit.xml,modules/registry/TEST-unit.xml,modules/surrealdb/TEST-unit.xml,modules/valkey/TEST-unit.xml,modules/vault/TEST-unit.xml,modules/vearch/TEST-unit.xml,modules/weaviate/TEST-unit.xml,modules/yugabytedb/TEST-unit.xml
diff --git a/testing.go b/testing.go
index 41391519de..cafd4fe920 100644
--- a/testing.go
+++ b/testing.go
@@ -68,11 +68,11 @@ func (lc *StdoutLogConsumer) Accept(l Log) {
// container is stopped when the function ends.
//
// before any error check. If container is nil, its a no-op.
-func CleanupContainer(tb testing.TB, container Container, options ...TerminateOption) {
+func CleanupContainer(tb testing.TB, ctr Container, options ...TerminateOption) {
tb.Helper()
tb.Cleanup(func() {
- noErrorOrIgnored(tb, TerminateContainer(container, options...))
+ noErrorOrIgnored(tb, TerminateContainer(ctr, options...))
})
}