From 84fef936e742dc715bc8aac6e822a34bee7a85b1 Mon Sep 17 00:00:00 2001 From: "dependabot[bot]" <49699333+dependabot[bot]@users.noreply.github.com> Date: Mon, 13 Jan 2025 04:18:10 +0000 Subject: [PATCH] Bump github.com/testcontainers/testcontainers-go/modules/postgres Bumps [github.com/testcontainers/testcontainers-go/modules/postgres](https://github.com/testcontainers/testcontainers-go) from 0.34.0 to 0.35.0. - [Release notes](https://github.com/testcontainers/testcontainers-go/releases) - [Commits](https://github.com/testcontainers/testcontainers-go/compare/v0.34.0...v0.35.0) --- updated-dependencies: - dependency-name: github.com/testcontainers/testcontainers-go/modules/postgres dependency-type: direct:production update-type: version-update:semver-minor ... Signed-off-by: dependabot[bot] --- go.mod | 4 +- go.sum | 10 +- .../testcontainers-go/.golangci.yml | 6 +- .../testcontainers/testcontainers-go/Makefile | 7 + .../testcontainers-go/Pipfile.lock | 7 +- .../testcontainers-go/cleanup.go | 98 ++-- .../testcontainers-go/commons-test.mk | 3 +- .../testcontainers-go/container.go | 66 ++- .../testcontainers-go/docker.go | 419 +++++++++++------- .../testcontainers-go/docker_auth.go | 50 ++- .../testcontainers-go/exec/processor.go | 46 +- .../internal/config/config.go | 29 +- .../internal/core/bootstrap.go | 3 +- .../internal/core/docker_host.go | 7 +- .../internal/core/docker_rootless.go | 4 +- .../testcontainers-go/internal/version.go | 2 +- .../testcontainers-go/lifecycle.go | 161 ++++--- .../testcontainers-go/logger.go | 15 +- .../testcontainers-go/mkdocs.yml | 8 +- .../modules/postgres/postgres.go | 46 +- .../postgres/resources/customEntrypoint.sh | 25 ++ .../testcontainers-go/network.go | 15 +- .../testcontainers-go/options.go | 2 +- .../testcontainers-go/port_forwarding.go | 274 +++++++----- .../testcontainers-go/provider.go | 2 +- .../testcontainers-go/reaper.go | 30 +- .../testcontainers-go/requirements.txt | 4 +- .../sonar-project.properties | 2 +- .../testcontainers-go/testing.go | 28 +- .../testcontainers-go/wait/all.go | 4 +- .../testcontainers-go/wait/host_port.go | 2 +- .../testcontainers-go/wait/log.go | 111 ++++- .../testcontainers-go/wait/tls.go | 167 +++++++ .../testcontainers-go/wait/walk.go | 74 ++++ vendor/modules.txt | 4 +- 35 files changed, 1230 insertions(+), 505 deletions(-) create mode 100644 vendor/github.com/testcontainers/testcontainers-go/modules/postgres/resources/customEntrypoint.sh create mode 100644 vendor/github.com/testcontainers/testcontainers-go/wait/tls.go create mode 100644 vendor/github.com/testcontainers/testcontainers-go/wait/walk.go diff --git a/go.mod b/go.mod index 9c94c289..4b85a798 100644 --- a/go.mod +++ b/go.mod @@ -10,8 +10,8 @@ require ( github.com/pashagolub/pgxmock/v4 v4.3.0 github.com/prometheus/client_golang v1.20.5 github.com/prometheus/client_model v0.6.1 - github.com/testcontainers/testcontainers-go v0.34.0 - github.com/testcontainers/testcontainers-go/modules/postgres v0.34.0 + github.com/testcontainers/testcontainers-go v0.35.0 + github.com/testcontainers/testcontainers-go/modules/postgres v0.35.0 github.com/tsenart/vegeta/v12 v12.12.0 ) diff --git a/go.sum b/go.sum index af08485b..26a57b2c 100644 --- a/go.sum +++ b/go.sum @@ -94,6 +94,8 @@ github.com/magiconair/properties v1.8.7 h1:IeQXZAiQcpL9mgcAe1Nu6cX9LLw6ExEHKjN0V github.com/magiconair/properties v1.8.7/go.mod h1:Dhd985XPs7jluiymwWYZ0G4Z61jb3vdS329zhj2hYo0= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/mdelapenya/tlscert v0.1.0 h1:YTpF579PYUX475eOL+6zyEO3ngLTOUWck78NBuJVXaM= +github.com/mdelapenya/tlscert v0.1.0/go.mod h1:wrbyM/DwbFCeCeqdPX/8c6hNOqQgbf0rUDErE1uD+64= github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk= @@ -157,10 +159,10 @@ github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UV github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= -github.com/testcontainers/testcontainers-go v0.34.0 h1:5fbgF0vIN5u+nD3IWabQwRybuB4GY8G2HHgCkbMzMHo= -github.com/testcontainers/testcontainers-go v0.34.0/go.mod h1:6P/kMkQe8yqPHfPWNulFGdFHTD8HB2vLq/231xY2iPQ= -github.com/testcontainers/testcontainers-go/modules/postgres v0.34.0 h1:c51aBXT3v2HEBVarmaBnsKzvgZjC5amn0qsj8Naqi50= -github.com/testcontainers/testcontainers-go/modules/postgres v0.34.0/go.mod h1:EWP75ogLQU4M4L8U+20mFipjV4WIR9WtlMXSB6/wiuc= +github.com/testcontainers/testcontainers-go v0.35.0 h1:uADsZpTKFAtp8SLK+hMwSaa+X+JiERHtd4sQAFmXeMo= +github.com/testcontainers/testcontainers-go v0.35.0/go.mod h1:oEVBj5zrfJTrgjwONs1SsRbnBtH9OKl+IGl3UMcr2B4= +github.com/testcontainers/testcontainers-go/modules/postgres v0.35.0 h1:eEGx9kYzZb2cNhRbBrNOCL/YPOM7+RMJiy3bB+ie0/I= +github.com/testcontainers/testcontainers-go/modules/postgres v0.35.0/go.mod h1:hfH71Mia/WWLBgMD2YctYcMlfsbnT0hflweL1dy8Q4s= github.com/tklauser/go-sysconf v0.3.14 h1:g5vzr9iPFFz24v2KZXs/pvpvh8/V9Fw6vQK5ZZb78yU= github.com/tklauser/go-sysconf v0.3.14/go.mod h1:1ym4lWMLUOhuBOPGtRcJm7tEGX4SCYNEEEtghGG/8uY= github.com/tklauser/numcpus v0.9.0 h1:lmyCHtANi8aRUgkckBgoDk1nHCux3n2cgkJLXdQGPDo= diff --git a/vendor/github.com/testcontainers/testcontainers-go/.golangci.yml b/vendor/github.com/testcontainers/testcontainers-go/.golangci.yml index 26f8f8a3..d708f003 100644 --- a/vendor/github.com/testcontainers/testcontainers-go/.golangci.yml +++ b/vendor/github.com/testcontainers/testcontainers-go/.golangci.yml @@ -7,11 +7,15 @@ linters: - gofumpt - misspell - nolintlint - - nonamedreturns + - nakedret + - perfsprint - testifylint - thelper + - usestdlibvars linters-settings: + nakedret: + max-func-lines: 0 errorlint: # Check whether fmt.Errorf uses the %w verb for formatting errors. # See the https://github.com/polyfloyd/go-errorlint for caveats. diff --git a/vendor/github.com/testcontainers/testcontainers-go/Makefile b/vendor/github.com/testcontainers/testcontainers-go/Makefile index 7c8c5e36..9c5a968e 100644 --- a/vendor/github.com/testcontainers/testcontainers-go/Makefile +++ b/vendor/github.com/testcontainers/testcontainers-go/Makefile @@ -1,5 +1,12 @@ include ./commons-test.mk +.PHONY: lint-all +lint-all: + $(MAKE) lint + $(MAKE) -C modulegen lint + $(MAKE) -C examples lint-examples + $(MAKE) -C modules lint-modules + .PHONY: test-all test-all: tools test-tools test-unit diff --git a/vendor/github.com/testcontainers/testcontainers-go/Pipfile.lock b/vendor/github.com/testcontainers/testcontainers-go/Pipfile.lock index 9a2f6d24..d08964ab 100644 --- a/vendor/github.com/testcontainers/testcontainers-go/Pipfile.lock +++ b/vendor/github.com/testcontainers/testcontainers-go/Pipfile.lock @@ -178,11 +178,12 @@ }, "jinja2": { "hashes": [ - "sha256:4a3aee7acbbe7303aede8e9648d13b8bf88a429282aa6122a993f0ac800cb369", - "sha256:bc5dd2abb727a5319567b7a813e6a2e7318c39f4f487cfe6c89c6f9c7d25197d" + "sha256:8fefff8dc3034e27bb80d67c671eb8a9bc424c0ef4c0826edbff304cceff43bb", + "sha256:aba0f4dc9ed8013c424088f68a5c226f7d6097ed89b246d7749c2ec4175c6adb" ], + "index": "pypi", "markers": "python_version >= '3.7'", - "version": "==3.1.4" + "version": "==3.1.5" }, "markdown": { "hashes": [ diff --git a/vendor/github.com/testcontainers/testcontainers-go/cleanup.go b/vendor/github.com/testcontainers/testcontainers-go/cleanup.go index e2d52440..d676b42b 100644 --- a/vendor/github.com/testcontainers/testcontainers-go/cleanup.go +++ b/vendor/github.com/testcontainers/testcontainers-go/cleanup.go @@ -8,20 +8,65 @@ import ( "time" ) -// terminateOptions is a type that holds the options for terminating a container. -type terminateOptions struct { - ctx context.Context - timeout *time.Duration - volumes []string +// TerminateOptions is a type that holds the options for terminating a container. +type TerminateOptions struct { + ctx context.Context + stopTimeout *time.Duration + volumes []string } // TerminateOption is a type that represents an option for terminating a container. -type TerminateOption func(*terminateOptions) +type TerminateOption func(*TerminateOptions) + +// NewTerminateOptions returns a fully initialised TerminateOptions. +// Defaults: StopTimeout: 10 seconds. +func NewTerminateOptions(ctx context.Context, opts ...TerminateOption) *TerminateOptions { + timeout := time.Second * 10 + options := &TerminateOptions{ + stopTimeout: &timeout, + ctx: ctx, + } + for _, opt := range opts { + opt(options) + } + return options +} + +// Context returns the context to use during a Terminate. +func (o *TerminateOptions) Context() context.Context { + return o.ctx +} + +// StopTimeout returns the stop timeout to use during a Terminate. +func (o *TerminateOptions) StopTimeout() *time.Duration { + return o.stopTimeout +} + +// Cleanup performs any clean up needed +func (o *TerminateOptions) Cleanup() error { + // TODO: simplify this when when perform the client refactor. + if len(o.volumes) == 0 { + return nil + } + client, err := NewDockerClientWithOpts(o.ctx) + if err != nil { + return fmt.Errorf("docker client: %w", err) + } + defer client.Close() + // Best effort to remove all volumes. + var errs []error + for _, volume := range o.volumes { + if errRemove := client.VolumeRemove(o.ctx, volume, true); errRemove != nil { + errs = append(errs, fmt.Errorf("volume remove %q: %w", volume, errRemove)) + } + } + return errors.Join(errs...) +} // StopContext returns a TerminateOption that sets the context. // Default: context.Background(). func StopContext(ctx context.Context) TerminateOption { - return func(c *terminateOptions) { + return func(c *TerminateOptions) { c.ctx = ctx } } @@ -29,8 +74,8 @@ func StopContext(ctx context.Context) TerminateOption { // StopTimeout returns a TerminateOption that sets the timeout. // Default: See [Container.Stop]. func StopTimeout(timeout time.Duration) TerminateOption { - return func(c *terminateOptions) { - c.timeout = &timeout + return func(c *TerminateOptions) { + c.stopTimeout = &timeout } } @@ -39,7 +84,7 @@ func StopTimeout(timeout time.Duration) TerminateOption { // which are not removed by default. // Default: nil. func RemoveVolumes(volumes ...string) TerminateOption { - return func(c *terminateOptions) { + return func(c *TerminateOptions) { c.volumes = volumes } } @@ -54,41 +99,12 @@ func TerminateContainer(container Container, options ...TerminateOption) error { return nil } - c := &terminateOptions{ - ctx: context.Background(), - } - - for _, opt := range options { - opt(c) - } - - // TODO: Add a timeout when terminate supports it. - err := container.Terminate(c.ctx) + err := container.Terminate(context.Background(), options...) if !isCleanupSafe(err) { return fmt.Errorf("terminate: %w", err) } - // Remove additional volumes if any. - if len(c.volumes) == 0 { - return nil - } - - client, err := NewDockerClientWithOpts(c.ctx) - if err != nil { - return fmt.Errorf("docker client: %w", err) - } - - defer client.Close() - - // Best effort to remove all volumes. - var errs []error - for _, volume := range c.volumes { - if errRemove := client.VolumeRemove(c.ctx, volume, true); errRemove != nil { - errs = append(errs, fmt.Errorf("volume remove %q: %w", volume, errRemove)) - } - } - - return errors.Join(errs...) + return nil } // isNil returns true if val is nil or an nil instance false otherwise. diff --git a/vendor/github.com/testcontainers/testcontainers-go/commons-test.mk b/vendor/github.com/testcontainers/testcontainers-go/commons-test.mk index d168ff5c..91ed6a12 100644 --- a/vendor/github.com/testcontainers/testcontainers-go/commons-test.mk +++ b/vendor/github.com/testcontainers/testcontainers-go/commons-test.mk @@ -47,7 +47,8 @@ test-%: $(GOBIN)/gotestsum -- \ -v \ -coverprofile=coverage.out \ - -timeout=30m + -timeout=30m \ + -race .PHONY: tools tools: diff --git a/vendor/github.com/testcontainers/testcontainers-go/container.go b/vendor/github.com/testcontainers/testcontainers-go/container.go index d114a598..50fc656e 100644 --- a/vendor/github.com/testcontainers/testcontainers-go/container.go +++ b/vendor/github.com/testcontainers/testcontainers-go/container.go @@ -50,7 +50,7 @@ type Container interface { Stop(context.Context, *time.Duration) error // stop the container // Terminate stops and removes the container and its image if it was built and not flagged as kept. - Terminate(ctx context.Context) error + Terminate(ctx context.Context, opts ...TerminateOption) error Logs(context.Context) (io.ReadCloser, error) // Get logs of the container FollowOutput(LogConsumer) // Deprecated: it will be removed in the next major release @@ -74,10 +74,10 @@ type Container interface { type ImageBuildInfo interface { BuildOptions() (types.ImageBuildOptions, error) // converts the ImageBuildInfo to a types.ImageBuildOptions GetContext() (io.Reader, error) // the path to the build context - GetDockerfile() string // the relative path to the Dockerfile, including the fileitself + GetDockerfile() string // the relative path to the Dockerfile, including the file itself GetRepo() string // get repo label for image GetTag() string // get tag label for image - ShouldPrintBuildLog() bool // allow build log to be printed to stdout + BuildLogWriter() io.Writer // for output of build log, use io.Discard to disable the output ShouldBuildImage() bool // return true if the image needs to be built GetBuildArgs() map[string]*string // return the environment args used to build the from Dockerfile GetAuthConfigs() map[string]registry.AuthConfig // Deprecated. Testcontainers will detect registry credentials automatically. Return the auth configs to be able to pull from an authenticated docker registry @@ -92,7 +92,8 @@ type FromDockerfile struct { Repo string // the repo label for image, defaults to UUID Tag string // the tag label for image, defaults to UUID BuildArgs map[string]*string // enable user to pass build args to docker daemon - PrintBuildLog bool // enable user to print build log + PrintBuildLog bool // Deprecated: Use BuildLogWriter instead + BuildLogWriter io.Writer // for output of build log, defaults to io.Discard AuthConfigs map[string]registry.AuthConfig // Deprecated. Testcontainers will detect registry credentials automatically. Enable auth configs to be able to pull from an authenticated docker registry // KeepImage describes whether DockerContainer.Terminate should not delete the // container image. Useful for images that are built from a Dockerfile and take a @@ -167,6 +168,15 @@ type ContainerRequest struct { LogConsumerCfg *LogConsumerConfig // define the configuration for the log producer and its log consumers to follow the logs } +// sessionID returns the session ID for the container request. +func (c *ContainerRequest) sessionID() string { + if sessionID := c.Labels[core.LabelSessionID]; sessionID != "" { + return sessionID + } + + return core.SessionID() +} + // containerOptions functional options for a container type containerOptions struct { ImageName string @@ -277,34 +287,34 @@ func (c *ContainerRequest) GetBuildArgs() map[string]*string { return c.FromDockerfile.BuildArgs } -// GetDockerfile returns the Dockerfile from the ContainerRequest, defaults to "Dockerfile" +// GetDockerfile returns the Dockerfile from the ContainerRequest, defaults to "Dockerfile". +// Sets FromDockerfile.Dockerfile to the default if blank. func (c *ContainerRequest) GetDockerfile() string { - f := c.FromDockerfile.Dockerfile - if f == "" { - return "Dockerfile" + if c.FromDockerfile.Dockerfile == "" { + c.FromDockerfile.Dockerfile = "Dockerfile" } - return f + return c.FromDockerfile.Dockerfile } -// GetRepo returns the Repo label for image from the ContainerRequest, defaults to UUID +// GetRepo returns the Repo label for image from the ContainerRequest, defaults to UUID. +// Sets FromDockerfile.Repo to the default value if blank. func (c *ContainerRequest) GetRepo() string { - r := c.FromDockerfile.Repo - if r == "" { - return uuid.NewString() + if c.FromDockerfile.Repo == "" { + c.FromDockerfile.Repo = uuid.NewString() } - return strings.ToLower(r) + return strings.ToLower(c.FromDockerfile.Repo) } -// GetTag returns the Tag label for image from the ContainerRequest, defaults to UUID +// GetTag returns the Tag label for image from the ContainerRequest, defaults to UUID. +// Sets FromDockerfile.Tag to the default value if blank. func (c *ContainerRequest) GetTag() string { - t := c.FromDockerfile.Tag - if t == "" { - return uuid.NewString() + if c.FromDockerfile.Tag == "" { + c.FromDockerfile.Tag = uuid.NewString() } - return strings.ToLower(t) + return strings.ToLower(c.FromDockerfile.Tag) } // Deprecated: Testcontainers will detect registry credentials automatically, and it will be removed in the next major release. @@ -401,8 +411,20 @@ func (c *ContainerRequest) ShouldKeepBuiltImage() bool { return c.FromDockerfile.KeepImage } -func (c *ContainerRequest) ShouldPrintBuildLog() bool { - return c.FromDockerfile.PrintBuildLog +// BuildLogWriter returns the io.Writer for output of log when building a Docker image from +// a Dockerfile. It returns the BuildLogWriter from the ContainerRequest, defaults to io.Discard. +// For backward compatibility, if BuildLogWriter is default and PrintBuildLog is true, +// the function returns os.Stderr. +func (c *ContainerRequest) BuildLogWriter() io.Writer { + if c.FromDockerfile.BuildLogWriter != nil { + return c.FromDockerfile.BuildLogWriter + } + if c.FromDockerfile.PrintBuildLog { + c.FromDockerfile.BuildLogWriter = os.Stderr + } else { + c.FromDockerfile.BuildLogWriter = io.Discard + } + return c.FromDockerfile.BuildLogWriter } // BuildOptions returns the image build options when building a Docker image from a Dockerfile. @@ -523,7 +545,7 @@ func (c *ContainerRequest) validateMounts() error { if len(hostConfig.Binds) > 0 { for _, bind := range hostConfig.Binds { parts := strings.Split(bind, ":") - if len(parts) != 2 { + if len(parts) != 2 && len(parts) != 3 { return fmt.Errorf("%w: %s", ErrInvalidBindMount, bind) } targetPath := parts[1] diff --git a/vendor/github.com/testcontainers/testcontainers-go/docker.go b/vendor/github.com/testcontainers/testcontainers-go/docker.go index 2ef8c697..2ce849be 100644 --- a/vendor/github.com/testcontainers/testcontainers-go/docker.go +++ b/vendor/github.com/testcontainers/testcontainers-go/docker.go @@ -15,7 +15,7 @@ import ( "os" "path/filepath" "regexp" - "strings" + "sync" "time" "github.com/cenkalti/backoff/v4" @@ -152,7 +152,7 @@ func (c *DockerContainer) PortEndpoint(ctx context.Context, port nat.Port, proto protoFull := "" if proto != "" { - protoFull = fmt.Sprintf("%s://", proto) + protoFull = proto + "://" } return fmt.Sprintf("%s%s:%s", protoFull, host, outerPort.Port()), nil @@ -303,12 +303,11 @@ func (c *DockerContainer) Stop(ctx context.Context, timeout *time.Duration) erro // The following hooks are called in order: // - [ContainerLifecycleHooks.PreTerminates] // - [ContainerLifecycleHooks.PostTerminates] -func (c *DockerContainer) Terminate(ctx context.Context) error { - // ContainerRemove hardcodes stop timeout to 3 seconds which is too short - // to ensure that child containers are stopped so we manually call stop. - // TODO: make this configurable via a functional option. - timeout := 10 * time.Second - err := c.Stop(ctx, &timeout) +// +// Default: timeout is 10 seconds. +func (c *DockerContainer) Terminate(ctx context.Context, opts ...TerminateOption) error { + options := NewTerminateOptions(ctx, opts...) + err := c.Stop(options.Context(), options.StopTimeout()) if err != nil && !isCleanupSafe(err) { return fmt.Errorf("stop: %w", err) } @@ -343,6 +342,10 @@ func (c *DockerContainer) Terminate(ctx context.Context) error { c.sessionID = "" c.isRunning = false + if err = options.Cleanup(); err != nil { + errs = append(errs, err) + } + return errors.Join(errs...) } @@ -762,11 +765,15 @@ func (c *DockerContainer) startLogProduction(ctx context.Context, opts ...LogPro // Setup the log production context which will be used to stop the log production. c.logProductionCtx, c.logProductionCancel = context.WithCancelCause(ctx) - go func() { - err := c.logProducer(stdout, stderr) - // Set context cancel cause, if not already set. - c.logProductionCancel(err) - }() + // We capture context cancel function to avoid data race with multiple + // calls to startLogProduction. + go func(cancel context.CancelCauseFunc) { + // Ensure the context is cancelled when log productions completes + // so that GetLogProductionErrorChannel functions correctly. + defer cancel(nil) + + c.logProducer(stdout, stderr) + }(c.logProductionCancel) return nil } @@ -775,40 +782,49 @@ func (c *DockerContainer) startLogProduction(ctx context.Context, opts ...LogPro // - logProductionCtx is done // - A fatal error occurs // - No more logs are available -func (c *DockerContainer) logProducer(stdout, stderr io.Writer) error { +func (c *DockerContainer) logProducer(stdout, stderr io.Writer) { // Clean up idle client connections. defer c.provider.Close() // Setup the log options, start from the beginning. - options := container.LogsOptions{ + options := &container.LogsOptions{ ShowStdout: true, ShowStderr: true, Follow: true, } - for { - timeoutCtx, cancel := context.WithTimeout(c.logProductionCtx, *c.logProductionTimeout) - defer cancel() + // Use a separate method so that timeout cancel function is + // called correctly. + for c.copyLogsTimeout(stdout, stderr, options) { + } +} - err := c.copyLogs(timeoutCtx, stdout, stderr, options) - switch { - case err == nil: - // No more logs available. - return nil - case c.logProductionCtx.Err() != nil: - // Log production was stopped or caller context is done. - return nil - case timeoutCtx.Err() != nil, errors.Is(err, net.ErrClosed): - // Timeout or client connection closed, retry. - default: - // Unexpected error, retry. - Logger.Printf("Unexpected error reading logs: %v", err) - } +// copyLogsTimeout copies logs from the container to stdout and stderr with a timeout. +// It returns true if the log production should be retried, false otherwise. +func (c *DockerContainer) copyLogsTimeout(stdout, stderr io.Writer, options *container.LogsOptions) bool { + timeoutCtx, cancel := context.WithTimeout(c.logProductionCtx, *c.logProductionTimeout) + defer cancel() - // Retry from the last log received. - now := time.Now() - options.Since = fmt.Sprintf("%d.%09d", now.Unix(), int64(now.Nanosecond())) + err := c.copyLogs(timeoutCtx, stdout, stderr, *options) + switch { + case err == nil: + // No more logs available. + return false + case c.logProductionCtx.Err() != nil: + // Log production was stopped or caller context is done. + return false + case timeoutCtx.Err() != nil, errors.Is(err, net.ErrClosed): + // Timeout or client connection closed, retry. + default: + // Unexpected error, retry. + Logger.Printf("Unexpected error reading logs: %v", err) } + + // Retry from the last log received. + now := time.Now() + options.Since = fmt.Sprintf("%d.%09d", now.Unix(), int64(now.Nanosecond())) + + return true } // copyLogs copies logs from the container to stdout and stderr. @@ -866,13 +882,41 @@ func (c *DockerContainer) GetLogProductionErrorChannel() <-chan error { } errCh := make(chan error, 1) - go func() { - <-c.logProductionCtx.Done() - errCh <- context.Cause(c.logProductionCtx) - }() + go func(ctx context.Context) { + <-ctx.Done() + errCh <- context.Cause(ctx) + close(errCh) + }(c.logProductionCtx) + return errCh } +// connectReaper connects the reaper to the container if it is needed. +func (c *DockerContainer) connectReaper(ctx context.Context) error { + if c.provider.config.RyukDisabled || isReaperImage(c.Image) { + // Reaper is disabled or we are the reaper container. + return nil + } + + reaper, err := spawner.reaper(context.WithValue(ctx, core.DockerHostContextKey, c.provider.host), core.SessionID(), c.provider) + if err != nil { + return fmt.Errorf("reaper: %w", err) + } + + if c.terminationSignal, err = reaper.Connect(); err != nil { + return fmt.Errorf("reaper connect: %w", err) + } + + return nil +} + +// cleanupTermSignal triggers the termination signal if it was created and an error occurred. +func (c *DockerContainer) cleanupTermSignal(err error) { + if c.terminationSignal != nil && err != nil { + c.terminationSignal <- true + } +} + // DockerNetwork represents a network started using Docker type DockerNetwork struct { ID string // Network ID from Docker @@ -906,6 +950,7 @@ type DockerProvider struct { host string hostCache string config config.Config + mtx sync.Mutex } // Client gets the docker client used by the provider @@ -962,10 +1007,7 @@ func (p *DockerProvider) BuildImage(ctx context.Context, img ImageBuildInfo) (st } defer resp.Body.Close() - output := io.Discard - if img.ShouldPrintBuildLog() { - output = os.Stderr - } + output := img.BuildLogWriter() // Always process the output, even if it is not printed // to ensure that errors during the build process are @@ -980,33 +1022,30 @@ func (p *DockerProvider) BuildImage(ctx context.Context, img ImageBuildInfo) (st } // CreateContainer fulfils a request for a container without starting it -func (p *DockerProvider) CreateContainer(ctx context.Context, req ContainerRequest) (con Container, err error) { //nolint:nonamedreturns // Needed for error checking. +func (p *DockerProvider) CreateContainer(ctx context.Context, req ContainerRequest) (con Container, err error) { // defer the close of the Docker client connection the soonest defer p.Close() - // Make sure that bridge network exists - // In case it is disabled we will create reaper_default network - if p.DefaultNetwork == "" { - p.DefaultNetwork, err = p.getDefaultNetwork(ctx, p.client) - if err != nil { - return nil, err - } + var defaultNetwork string + defaultNetwork, err = p.ensureDefaultNetwork(ctx) + if err != nil { + return nil, fmt.Errorf("ensure default network: %w", err) } // If default network is not bridge make sure it is attached to the request // as container won't be attached to it automatically // in case of Podman the bridge network is called 'podman' as 'bridge' would conflict - if p.DefaultNetwork != p.defaultBridgeNetworkName { + if defaultNetwork != p.defaultBridgeNetworkName { isAttached := false for _, net := range req.Networks { - if net == p.DefaultNetwork { + if net == defaultNetwork { isAttached = true break } } if !isAttached { - req.Networks = append(req.Networks, p.DefaultNetwork) + req.Networks = append(req.Networks, defaultNetwork) } } @@ -1021,28 +1060,6 @@ func (p *DockerProvider) CreateContainer(ctx context.Context, req ContainerReque req.Labels = make(map[string]string) } - var termSignal chan bool - // the reaper does not need to start a reaper for itself - isReaperContainer := strings.HasSuffix(imageName, config.ReaperDefaultImage) - if !p.config.RyukDisabled && !isReaperContainer { - r, err := spawner.reaper(context.WithValue(ctx, core.DockerHostContextKey, p.host), core.SessionID(), p) - if err != nil { - return nil, fmt.Errorf("reaper: %w", err) - } - - termSignal, err := r.Connect() - if err != nil { - return nil, fmt.Errorf("reaper connect: %w", err) - } - - // Cleanup on error. - defer func() { - if err != nil { - termSignal <- true - } - }() - } - if err = req.Validate(); err != nil { return nil, err } @@ -1052,11 +1069,29 @@ func (p *DockerProvider) CreateContainer(ctx context.Context, req ContainerReque var platform *specs.Platform + defaultHooks := []ContainerLifecycleHooks{ + DefaultLoggingHook(p.Logger), + } + + origLifecycleHooks := req.LifecycleHooks + req.LifecycleHooks = []ContainerLifecycleHooks{ + combineContainerHooks(defaultHooks, req.LifecycleHooks), + } + if req.ShouldBuildImage() { + if err = req.buildingHook(ctx); err != nil { + return nil, err + } + imageName, err = p.BuildImage(ctx, &req) if err != nil { return nil, err } + + req.Image = imageName + if err = req.builtHook(ctx); err != nil { + return nil, err + } } else { for _, is := range req.ImageSubstitutors { modifiedTag, err := is.Substitute(imageName) @@ -1106,7 +1141,7 @@ func (p *DockerProvider) CreateContainer(ctx context.Context, req ContainerReque } } - if !isReaperContainer { + if !isReaperImage(imageName) { // Add the labels that identify this as a testcontainers container and // allow the reaper to terminate it if requested. AddGenericLabels(req.Labels) @@ -1132,13 +1167,12 @@ func (p *DockerProvider) CreateContainer(ctx context.Context, req ContainerReque networkingConfig := &network.NetworkingConfig{} // default hooks include logger hook and pre-create hook - defaultHooks := []ContainerLifecycleHooks{ - DefaultLoggingHook(p.Logger), + defaultHooks = append(defaultHooks, defaultPreCreateHook(p, dockerInput, hostConfig, networkingConfig), defaultCopyFileToContainerHook(req.Files), defaultLogConsumersHook(req.LogConsumerCfg), defaultReadinessHook(), - } + ) // in the case the container needs to access a local port // we need to forward the local port to the container @@ -1151,10 +1185,25 @@ func (p *DockerProvider) CreateContainer(ctx context.Context, req ContainerReque return nil, fmt.Errorf("expose host ports: %w", err) } + defer func() { + if err != nil && con == nil { + // Container setup failed so ensure we clean up the sshd container too. + ctr := &DockerContainer{ + provider: p, + logger: p.Logger, + lifecycleHooks: []ContainerLifecycleHooks{sshdForwardPortsHook}, + } + err = errors.Join(ctr.terminatingHook(ctx)) + } + }() + defaultHooks = append(defaultHooks, sshdForwardPortsHook) } - req.LifecycleHooks = []ContainerLifecycleHooks{combineContainerHooks(defaultHooks, req.LifecycleHooks)} + // Combine with the original LifecycleHooks to avoid duplicate logging hooks. + req.LifecycleHooks = []ContainerLifecycleHooks{ + combineContainerHooks(defaultHooks, origLifecycleHooks), + } err = req.creatingHook(ctx) if err != nil { @@ -1184,26 +1233,35 @@ func (p *DockerProvider) CreateContainer(ctx context.Context, req ContainerReque } } - c := &DockerContainer{ - ID: resp.ID, - WaitingFor: req.WaitingFor, - Image: imageName, - imageWasBuilt: req.ShouldBuildImage(), - keepBuiltImage: req.ShouldKeepBuiltImage(), - sessionID: core.SessionID(), - exposedPorts: req.ExposedPorts, - provider: p, - terminationSignal: termSignal, - logger: p.Logger, - lifecycleHooks: req.LifecycleHooks, + // This should match the fields set in ContainerFromDockerResponse. + ctr := &DockerContainer{ + ID: resp.ID, + WaitingFor: req.WaitingFor, + Image: imageName, + imageWasBuilt: req.ShouldBuildImage(), + keepBuiltImage: req.ShouldKeepBuiltImage(), + sessionID: req.sessionID(), + exposedPorts: req.ExposedPorts, + provider: p, + logger: p.Logger, + lifecycleHooks: req.LifecycleHooks, } - err = c.createdHook(ctx) - if err != nil { - return nil, err + if err = ctr.connectReaper(ctx); err != nil { + return ctr, err // No wrap as it would stutter. } - return c, nil + // Wrapped so the returned error is passed to the cleanup function. + defer func(ctr *DockerContainer) { + ctr.cleanupTermSignal(err) + }(ctr) + + if err = ctr.createdHook(ctx); err != nil { + // Return the container to allow caller to clean up. + return ctr, fmt.Errorf("created hook: %w", err) + } + + return ctr, nil } func (p *DockerProvider) findContainerByName(ctx context.Context, name string) (*types.Container, error) { @@ -1215,7 +1273,7 @@ func (p *DockerProvider) findContainerByName(ctx context.Context, name string) ( filter := filters.NewArgs(filters.Arg("name", fmt.Sprintf("^%s$", name))) containers, err := p.client.ContainerList(ctx, container.ListOptions{Filters: filter}) if err != nil { - return nil, err + return nil, fmt.Errorf("container list: %w", err) } defer p.Close() @@ -1251,7 +1309,7 @@ func (p *DockerProvider) waitContainerCreation(ctx context.Context, name string) ) } -func (p *DockerProvider) ReuseOrCreateContainer(ctx context.Context, req ContainerRequest) (con Container, err error) { //nolint:nonamedreturns // Needed for error check. +func (p *DockerProvider) ReuseOrCreateContainer(ctx context.Context, req ContainerRequest) (con Container, err error) { c, err := p.findContainerByName(ctx, req.Name) if err != nil { return nil, err @@ -1270,7 +1328,7 @@ func (p *DockerProvider) ReuseOrCreateContainer(ctx context.Context, req Contain } } - sessionID := core.SessionID() + sessionID := req.sessionID() var termSignal chan bool if !p.config.RyukDisabled { @@ -1411,10 +1469,13 @@ func (p *DockerProvider) Config() TestcontainersConfig { // Warning: this is based on your Docker host setting. Will fail if using an SSH tunnel // You can use the "TESTCONTAINERS_HOST_OVERRIDE" env variable to set this yourself func (p *DockerProvider) DaemonHost(ctx context.Context) (string, error) { - return daemonHost(ctx, p) + p.mtx.Lock() + defer p.mtx.Unlock() + + return p.daemonHostLocked(ctx) } -func daemonHost(ctx context.Context, p *DockerProvider) (string, error) { +func (p *DockerProvider) daemonHostLocked(ctx context.Context) (string, error) { if p.hostCache != "" { return p.hostCache, nil } @@ -1437,7 +1498,11 @@ func daemonHost(ctx context.Context, p *DockerProvider) (string, error) { p.hostCache = daemonURL.Hostname() case "unix", "npipe": if core.InAContainer() { - ip, err := p.GetGatewayIP(ctx) + defaultNetwork, err := p.ensureDefaultNetworkLocked(ctx) + if err != nil { + return "", fmt.Errorf("ensure default network: %w", err) + } + ip, err := p.getGatewayIP(ctx, defaultNetwork) if err != nil { ip, err = core.DefaultGatewayIP() if err != nil { @@ -1457,16 +1522,12 @@ func daemonHost(ctx context.Context, p *DockerProvider) (string, error) { // Deprecated: use network.New instead // CreateNetwork returns the object representing a new network identified by its name -func (p *DockerProvider) CreateNetwork(ctx context.Context, req NetworkRequest) (net Network, err error) { //nolint:nonamedreturns // Needed for error check. +func (p *DockerProvider) CreateNetwork(ctx context.Context, req NetworkRequest) (net Network, err error) { // defer the close of the Docker client connection the soonest defer p.Close() - // Make sure that bridge network exists - // In case it is disabled we will create reaper_default network - if p.DefaultNetwork == "" { - if p.DefaultNetwork, err = p.getDefaultNetwork(ctx, p.client); err != nil { - return nil, err - } + if _, err = p.ensureDefaultNetwork(ctx); err != nil { + return nil, fmt.Errorf("ensure default network: %w", err) } if req.Labels == nil { @@ -1482,7 +1543,7 @@ func (p *DockerProvider) CreateNetwork(ctx context.Context, req NetworkRequest) IPAM: req.IPAM, } - sessionID := core.SessionID() + sessionID := req.sessionID() var termSignal chan bool if !p.config.RyukDisabled { @@ -1537,14 +1598,15 @@ func (p *DockerProvider) GetNetwork(ctx context.Context, req NetworkRequest) (ne func (p *DockerProvider) GetGatewayIP(ctx context.Context) (string, error) { // Use a default network as defined in the DockerProvider - if p.DefaultNetwork == "" { - var err error - p.DefaultNetwork, err = p.getDefaultNetwork(ctx, p.client) - if err != nil { - return "", err - } + defaultNetwork, err := p.ensureDefaultNetwork(ctx) + if err != nil { + return "", fmt.Errorf("ensure default network: %w", err) } - nw, err := p.GetNetwork(ctx, NetworkRequest{Name: p.DefaultNetwork}) + return p.getGatewayIP(ctx, defaultNetwork) +} + +func (p *DockerProvider) getGatewayIP(ctx context.Context, defaultNetwork string) (string, error) { + nw, err := p.GetNetwork(ctx, NetworkRequest{Name: defaultNetwork}) if err != nil { return "", err } @@ -1563,76 +1625,97 @@ func (p *DockerProvider) GetGatewayIP(ctx context.Context) (string, error) { return ip, nil } -func (p *DockerProvider) getDefaultNetwork(ctx context.Context, cli client.APIClient) (string, error) { - // Get list of available networks - networkResources, err := cli.NetworkList(ctx, network.ListOptions{}) - if err != nil { - return "", err - } +// ensureDefaultNetwork ensures that defaultNetwork is set and creates +// it if it does not exist, returning its value. +// It is safe to call this method concurrently. +func (p *DockerProvider) ensureDefaultNetwork(ctx context.Context) (string, error) { + p.mtx.Lock() + defer p.mtx.Unlock() + return p.ensureDefaultNetworkLocked(ctx) +} - reaperNetwork := ReaperDefault +func (p *DockerProvider) ensureDefaultNetworkLocked(ctx context.Context) (string, error) { + if p.defaultNetwork != "" { + // Already set. + return p.defaultNetwork, nil + } - reaperNetworkExists := false + networkResources, err := p.client.NetworkList(ctx, network.ListOptions{}) + if err != nil { + return "", fmt.Errorf("network list: %w", err) + } + // TODO: remove once we have docker context support via #2810 + // Prefer the default bridge network if it exists. + // This makes the results stable as network list order is not guaranteed. for _, net := range networkResources { - if net.Name == p.defaultBridgeNetworkName { - return p.defaultBridgeNetworkName, nil + switch net.Name { + case p.defaultBridgeNetworkName: + p.defaultNetwork = p.defaultBridgeNetworkName + return p.defaultNetwork, nil + case ReaperDefault: + p.defaultNetwork = ReaperDefault } + } - if net.Name == reaperNetwork { - reaperNetworkExists = true - } + if p.defaultNetwork != "" { + return p.defaultNetwork, nil } - // Create a bridge network for the container communications - if !reaperNetworkExists { - _, err = cli.NetworkCreate(ctx, reaperNetwork, network.CreateOptions{ - Driver: Bridge, - Attachable: true, - Labels: GenericLabels(), - }) - // If the network already exists, we can ignore the error as that can - // happen if we are running multiple tests in parallel and we only - // need to ensure that the network exists. - if err != nil && !errdefs.IsConflict(err) { - return "", err - } + // Create a bridge network for the container communications. + _, err = p.client.NetworkCreate(ctx, ReaperDefault, network.CreateOptions{ + Driver: Bridge, + Attachable: true, + Labels: GenericLabels(), + }) + // If the network already exists, we can ignore the error as that can + // happen if we are running multiple tests in parallel and we only + // need to ensure that the network exists. + if err != nil && !errdefs.IsConflict(err) { + return "", fmt.Errorf("network create: %w", err) } - return reaperNetwork, nil + p.defaultNetwork = ReaperDefault + + return p.defaultNetwork, nil } -// containerFromDockerResponse builds a Docker container struct from the response of the Docker API -func containerFromDockerResponse(ctx context.Context, response types.Container) (*DockerContainer, error) { - provider, err := NewDockerProvider() - if err != nil { - return nil, err +// ContainerFromType builds a Docker container struct from the response of the Docker API +func (p *DockerProvider) ContainerFromType(ctx context.Context, response types.Container) (ctr *DockerContainer, err error) { + exposedPorts := make([]string, len(response.Ports)) + for i, port := range response.Ports { + exposedPorts[i] = fmt.Sprintf("%d/%s", port.PublicPort, port.Type) } - ctr := DockerContainer{} - - ctr.ID = response.ID - ctr.WaitingFor = nil - ctr.Image = response.Image - ctr.imageWasBuilt = false - - ctr.logger = provider.Logger - ctr.lifecycleHooks = []ContainerLifecycleHooks{ - DefaultLoggingHook(ctr.logger), + // This should match the fields set in CreateContainer. + ctr = &DockerContainer{ + ID: response.ID, + Image: response.Image, + imageWasBuilt: false, + sessionID: response.Labels[core.LabelSessionID], + isRunning: response.State == "running", + exposedPorts: exposedPorts, + provider: p, + logger: p.Logger, + lifecycleHooks: []ContainerLifecycleHooks{ + DefaultLoggingHook(p.Logger), + }, } - ctr.provider = provider - ctr.sessionID = core.SessionID() - ctr.consumers = []LogConsumer{} - ctr.isRunning = response.State == "running" + if err = ctr.connectReaper(ctx); err != nil { + return nil, err + } - // the termination signal should be obtained from the reaper - ctr.terminationSignal = nil + // Wrapped so the returned error is passed to the cleanup function. + defer func(ctr *DockerContainer) { + ctr.cleanupTermSignal(err) + }(ctr) // populate the raw representation of the container jsonRaw, err := ctr.inspectRawContainer(ctx) if err != nil { - return nil, fmt.Errorf("inspect raw container: %w", err) + // Return the container to allow caller to clean up. + return ctr, fmt.Errorf("inspect raw container: %w", err) } // the health status of the container, if any @@ -1640,7 +1723,7 @@ func containerFromDockerResponse(ctx context.Context, response types.Container) ctr.healthStatus = health.Status } - return &ctr, nil + return ctr, nil } // ListImages list images from the provider. If an image has multiple Tags, each tag is reported diff --git a/vendor/github.com/testcontainers/testcontainers-go/docker_auth.go b/vendor/github.com/testcontainers/testcontainers-go/docker_auth.go index af0d415d..58b3ef26 100644 --- a/vendor/github.com/testcontainers/testcontainers-go/docker_auth.go +++ b/vendor/github.com/testcontainers/testcontainers-go/docker_auth.go @@ -21,6 +21,9 @@ import ( // defaultRegistryFn is variable overwritten in tests to check for behaviour with different default values. var defaultRegistryFn = defaultRegistry +// getRegistryCredentials is a variable overwritten in tests to mock the dockercfg.GetRegistryCredentials function. +var getRegistryCredentials = dockercfg.GetRegistryCredentials + // DockerImageAuth returns the auth config for the given Docker image, extracting first its Docker registry. // Finally, it will use the credential helpers to extract the information from the docker config file // for that registry, if it exists. @@ -111,9 +114,28 @@ type credentials struct { var creds = &credentialsCache{entries: map[string]credentials{}} -// Get returns the username and password for the given hostname +// AuthConfig updates the details in authConfig for the given hostname +// as determined by the details in configKey. +func (c *credentialsCache) AuthConfig(hostname, configKey string, authConfig *registry.AuthConfig) error { + u, p, err := creds.get(hostname, configKey) + if err != nil { + return err + } + + if u != "" { + authConfig.Username = u + authConfig.Password = p + } else { + authConfig.IdentityToken = p + } + + return nil +} + +// get returns the username and password for the given hostname // as determined by the details in configPath. -func (c *credentialsCache) Get(hostname, configKey string) (string, string, error) { +// If the username is empty, the password is an identity token. +func (c *credentialsCache) get(hostname, configKey string) (string, string, error) { key := configKey + ":" + hostname c.mtx.RLock() entry, ok := c.entries[key] @@ -124,7 +146,7 @@ func (c *credentialsCache) Get(hostname, configKey string) (string, string, erro } // No entry found, request and cache. - user, password, err := dockercfg.GetRegistryCredentials(hostname) + user, password, err := getRegistryCredentials(hostname) if err != nil { return "", "", fmt.Errorf("getting credentials for %s: %w", hostname, err) } @@ -186,14 +208,10 @@ func getDockerAuthConfigs() (map[string]registry.AuthConfig, error) { switch { case ac.Username == "" && ac.Password == "": // Look up credentials from the credential store. - u, p, err := creds.Get(k, key) - if err != nil { + if err := creds.AuthConfig(k, key, &ac); err != nil { results <- authConfigResult{err: err} return } - - ac.Username = u - ac.Password = p case ac.Auth == "": // Create auth from the username and password encoding. ac.Auth = base64.StdEncoding.EncodeToString([]byte(ac.Username + ":" + ac.Password)) @@ -203,25 +221,19 @@ func getDockerAuthConfigs() (map[string]registry.AuthConfig, error) { }(k, v) } - // in the case where the auth field in the .docker/conf.json is empty, and the user has credential helpers registered - // the auth comes from there + // In the case where the auth field in the .docker/conf.json is empty, and the user has + // credential helpers registered the auth comes from there. for k := range cfg.CredentialHelpers { go func(k string) { defer wg.Done() - u, p, err := creds.Get(k, key) - if err != nil { + var ac registry.AuthConfig + if err := creds.AuthConfig(k, key, &ac); err != nil { results <- authConfigResult{err: err} return } - results <- authConfigResult{ - key: k, - cfg: registry.AuthConfig{ - Username: u, - Password: p, - }, - } + results <- authConfigResult{key: k, cfg: ac} }(k) } diff --git a/vendor/github.com/testcontainers/testcontainers-go/exec/processor.go b/vendor/github.com/testcontainers/testcontainers-go/exec/processor.go index 2b795836..9c852fb5 100644 --- a/vendor/github.com/testcontainers/testcontainers-go/exec/processor.go +++ b/vendor/github.com/testcontainers/testcontainers-go/exec/processor.go @@ -2,7 +2,9 @@ package exec import ( "bytes" + "fmt" "io" + "sync" "github.com/docker/docker/api/types/container" "github.com/docker/docker/pkg/stdcopy" @@ -60,6 +62,43 @@ func WithEnv(env []string) ProcessOption { }) } +// safeBuffer is a goroutine safe buffer. +type safeBuffer struct { + mtx sync.Mutex + buf bytes.Buffer + err error +} + +// Error sets an error for the next read. +func (sb *safeBuffer) Error(err error) { + sb.mtx.Lock() + defer sb.mtx.Unlock() + + sb.err = err +} + +// Write writes p to the buffer. +// It is safe for concurrent use by multiple goroutines. +func (sb *safeBuffer) Write(p []byte) (n int, err error) { + sb.mtx.Lock() + defer sb.mtx.Unlock() + + return sb.buf.Write(p) +} + +// Read reads up to len(p) bytes into p from the buffer. +// It is safe for concurrent use by multiple goroutines. +func (sb *safeBuffer) Read(p []byte) (n int, err error) { + sb.mtx.Lock() + defer sb.mtx.Unlock() + + if sb.err != nil { + return 0, sb.err + } + + return sb.buf.Read(p) +} + // Multiplexed returns a [ProcessOption] that configures the command execution // to combine stdout and stderr into a single stream without Docker's multiplexing headers. func Multiplexed() ProcessOption { @@ -73,13 +112,14 @@ func Multiplexed() ProcessOption { done := make(chan struct{}) - var outBuff bytes.Buffer - var errBuff bytes.Buffer + var outBuff safeBuffer + var errBuff safeBuffer go func() { + defer close(done) if _, err := stdcopy.StdCopy(&outBuff, &errBuff, opts.Reader); err != nil { + outBuff.Error(fmt.Errorf("copying output: %w", err)) return } - close(done) }() <-done diff --git a/vendor/github.com/testcontainers/testcontainers-go/internal/config/config.go b/vendor/github.com/testcontainers/testcontainers-go/internal/config/config.go index b0bcc24d..85be6acd 100644 --- a/vendor/github.com/testcontainers/testcontainers-go/internal/config/config.go +++ b/vendor/github.com/testcontainers/testcontainers-go/internal/config/config.go @@ -11,7 +11,7 @@ import ( "github.com/magiconair/properties" ) -const ReaperDefaultImage = "testcontainers/ryuk:0.10.2" +const ReaperDefaultImage = "testcontainers/ryuk:0.11.0" var ( tcConfig Config @@ -68,17 +68,17 @@ type Config struct { // RyukReconnectionTimeout is the time to wait before attempting to reconnect to the Garbage Collector container. // - // Environment variable: TESTCONTAINERS_RYUK_RECONNECTION_TIMEOUT + // Environment variable: RYUK_RECONNECTION_TIMEOUT RyukReconnectionTimeout time.Duration `properties:"ryuk.reconnection.timeout,default=10s"` // RyukConnectionTimeout is the time to wait before timing out when connecting to the Garbage Collector container. // - // Environment variable: TESTCONTAINERS_RYUK_CONNECTION_TIMEOUT + // Environment variable: RYUK_CONNECTION_TIMEOUT RyukConnectionTimeout time.Duration `properties:"ryuk.connection.timeout,default=1m"` // RyukVerbose is a flag to enable or disable verbose logging for the Garbage Collector. // - // Environment variable: TESTCONTAINERS_RYUK_VERBOSE + // Environment variable: RYUK_VERBOSE RyukVerbose bool `properties:"ryuk.verbose,default=false"` // TestcontainersHost is the address of the Testcontainers host. @@ -126,17 +126,17 @@ func read() Config { config.RyukPrivileged = ryukPrivilegedEnv == "true" } - ryukVerboseEnv := os.Getenv("TESTCONTAINERS_RYUK_VERBOSE") + ryukVerboseEnv := readTestcontainersEnv("RYUK_VERBOSE") if parseBool(ryukVerboseEnv) { config.RyukVerbose = ryukVerboseEnv == "true" } - ryukReconnectionTimeoutEnv := os.Getenv("TESTCONTAINERS_RYUK_RECONNECTION_TIMEOUT") + ryukReconnectionTimeoutEnv := readTestcontainersEnv("RYUK_RECONNECTION_TIMEOUT") if timeout, err := time.ParseDuration(ryukReconnectionTimeoutEnv); err == nil { config.RyukReconnectionTimeout = timeout } - ryukConnectionTimeoutEnv := os.Getenv("TESTCONTAINERS_RYUK_CONNECTION_TIMEOUT") + ryukConnectionTimeoutEnv := readTestcontainersEnv("RYUK_CONNECTION_TIMEOUT") if timeout, err := time.ParseDuration(ryukConnectionTimeoutEnv); err == nil { config.RyukConnectionTimeout = timeout } @@ -168,3 +168,18 @@ func parseBool(input string) bool { _, err := strconv.ParseBool(input) return err == nil } + +// readTestcontainersEnv reads the environment variable with the given name. +// It checks for the environment variable with the given name first, and then +// checks for the environment variable with the given name prefixed with "TESTCONTAINERS_". +func readTestcontainersEnv(envVar string) string { + value := os.Getenv(envVar) + if value != "" { + return value + } + + // TODO: remove this prefix after the next major release + const prefix string = "TESTCONTAINERS_" + + return os.Getenv(prefix + envVar) +} diff --git a/vendor/github.com/testcontainers/testcontainers-go/internal/core/bootstrap.go b/vendor/github.com/testcontainers/testcontainers-go/internal/core/bootstrap.go index cf06dde7..1c452977 100644 --- a/vendor/github.com/testcontainers/testcontainers-go/internal/core/bootstrap.go +++ b/vendor/github.com/testcontainers/testcontainers-go/internal/core/bootstrap.go @@ -2,6 +2,7 @@ package core import ( "crypto/sha256" + "encoding/hex" "fmt" "os" @@ -89,7 +90,7 @@ func init() { return } - sessionID = fmt.Sprintf("%x", hasher.Sum(nil)) + sessionID = hex.EncodeToString(hasher.Sum(nil)) } func ProcessID() string { diff --git a/vendor/github.com/testcontainers/testcontainers-go/internal/core/docker_host.go b/vendor/github.com/testcontainers/testcontainers-go/internal/core/docker_host.go index 3088a374..765626da 100644 --- a/vendor/github.com/testcontainers/testcontainers-go/internal/core/docker_host.go +++ b/vendor/github.com/testcontainers/testcontainers-go/internal/core/docker_host.go @@ -309,10 +309,15 @@ func testcontainersHostFromProperties(ctx context.Context) (string, error) { return "", ErrTestcontainersHostNotSetInProperties } +// DockerEnvFile is the file that is created when running inside a container. +// It's a variable to allow testing. +// TODO: Remove this once context rework is done, which eliminates need for the default network creation. +var DockerEnvFile = "/.dockerenv" + // InAContainer returns true if the code is running inside a container // See https://github.com/docker/docker/blob/a9fa38b1edf30b23cae3eade0be48b3d4b1de14b/daemon/initlayer/setup_unix.go#L25 func InAContainer() bool { - return inAContainer("/.dockerenv") + return inAContainer(DockerEnvFile) } func inAContainer(path string) bool { diff --git a/vendor/github.com/testcontainers/testcontainers-go/internal/core/docker_rootless.go b/vendor/github.com/testcontainers/testcontainers-go/internal/core/docker_rootless.go index b8e0f6e1..70cdebf2 100644 --- a/vendor/github.com/testcontainers/testcontainers-go/internal/core/docker_rootless.go +++ b/vendor/github.com/testcontainers/testcontainers-go/internal/core/docker_rootless.go @@ -3,11 +3,11 @@ package core import ( "context" "errors" - "fmt" "net/url" "os" "path/filepath" "runtime" + "strconv" ) var ( @@ -144,7 +144,7 @@ func rootlessSocketPathFromHomeDesktopDir() (string, error) { // rootlessSocketPathFromRunDir returns the path to the rootless Docker socket from the /run/user//docker.sock file. func rootlessSocketPathFromRunDir() (string, error) { uid := os.Getuid() - f := filepath.Join(baseRunDir, "user", fmt.Sprintf("%d", uid), "docker.sock") + f := filepath.Join(baseRunDir, "user", strconv.Itoa(uid), "docker.sock") if fileExists(f) { return f, nil } diff --git a/vendor/github.com/testcontainers/testcontainers-go/internal/version.go b/vendor/github.com/testcontainers/testcontainers-go/internal/version.go index 0c688d5e..6e8cb510 100644 --- a/vendor/github.com/testcontainers/testcontainers-go/internal/version.go +++ b/vendor/github.com/testcontainers/testcontainers-go/internal/version.go @@ -1,4 +1,4 @@ package internal // Version is the next development version of the application -const Version = "0.34.0" +const Version = "0.35.0" diff --git a/vendor/github.com/testcontainers/testcontainers-go/lifecycle.go b/vendor/github.com/testcontainers/testcontainers-go/lifecycle.go index 57833daf..63446f71 100644 --- a/vendor/github.com/testcontainers/testcontainers-go/lifecycle.go +++ b/vendor/github.com/testcontainers/testcontainers-go/lifecycle.go @@ -5,6 +5,7 @@ import ( "errors" "fmt" "io" + "reflect" "strings" "time" @@ -39,6 +40,8 @@ type ContainerHook func(ctx context.Context, ctr Container) error // to modify the container lifecycle. All the container lifecycle hooks except the PreCreates hooks // will be passed to the container once it's created type ContainerLifecycleHooks struct { + PreBuilds []ContainerRequestHook + PostBuilds []ContainerRequestHook PreCreates []ContainerRequestHook PostCreates []ContainerHook PreStarts []ContainerHook @@ -57,6 +60,18 @@ var DefaultLoggingHook = func(logger Logging) ContainerLifecycleHooks { } return ContainerLifecycleHooks{ + PreBuilds: []ContainerRequestHook{ + func(ctx context.Context, req ContainerRequest) error { + logger.Printf("🐳 Building image %s:%s", req.GetRepo(), req.GetTag()) + return nil + }, + }, + PostBuilds: []ContainerRequestHook{ + func(ctx context.Context, req ContainerRequest) error { + logger.Printf("✅ Built image %s", req.Image) + return nil + }, + }, PreCreates: []ContainerRequestHook{ func(ctx context.Context, req ContainerRequest) error { logger.Printf("🐳 Creating container for image %s", req.Image) @@ -284,11 +299,34 @@ var defaultReadinessHook = func() ContainerLifecycleHooks { } } +// buildingHook is a hook that will be called before a container image is built. +func (req ContainerRequest) buildingHook(ctx context.Context) error { + return req.applyLifecycleHooks(func(lifecycleHooks ContainerLifecycleHooks) error { + return lifecycleHooks.Building(ctx)(req) + }) +} + +// builtHook is a hook that will be called after a container image is built. +func (req ContainerRequest) builtHook(ctx context.Context) error { + return req.applyLifecycleHooks(func(lifecycleHooks ContainerLifecycleHooks) error { + return lifecycleHooks.Built(ctx)(req) + }) +} + // creatingHook is a hook that will be called before a container is created. func (req ContainerRequest) creatingHook(ctx context.Context) error { - errs := make([]error, len(req.LifecycleHooks)) - for i, lifecycleHooks := range req.LifecycleHooks { - errs[i] = lifecycleHooks.Creating(ctx)(req) + return req.applyLifecycleHooks(func(lifecycleHooks ContainerLifecycleHooks) error { + return lifecycleHooks.Creating(ctx)(req) + }) +} + +// applyLifecycleHooks calls hook on all LifecycleHooks. +func (req ContainerRequest) applyLifecycleHooks(hook func(lifecycleHooks ContainerLifecycleHooks) error) error { + var errs []error + for _, lifecycleHooks := range req.LifecycleHooks { + if err := hook(lifecycleHooks); err != nil { + errs = append(errs, err) + } } return errors.Join(errs...) @@ -370,9 +408,11 @@ func (c *DockerContainer) terminatedHook(ctx context.Context) error { // applyLifecycleHooks applies all lifecycle hooks reporting the container logs on error if logError is true. func (c *DockerContainer) applyLifecycleHooks(ctx context.Context, logError bool, hooks func(lifecycleHooks ContainerLifecycleHooks) []ContainerHook) error { - errs := make([]error, len(c.lifecycleHooks)) - for i, lifecycleHooks := range c.lifecycleHooks { - errs[i] = containerHookFn(ctx, hooks(lifecycleHooks))(c) + var errs []error + for _, lifecycleHooks := range c.lifecycleHooks { + if err := containerHookFn(ctx, hooks(lifecycleHooks))(c); err != nil { + errs = append(errs, err) + } } if err := errors.Join(errs...); err != nil { @@ -394,10 +434,26 @@ func (c *DockerContainer) applyLifecycleHooks(ctx context.Context, logError bool return nil } +// Building is a hook that will be called before a container image is built. +func (c ContainerLifecycleHooks) Building(ctx context.Context) func(req ContainerRequest) error { + return containerRequestHook(ctx, c.PreBuilds) +} + +// Building is a hook that will be called before a container image is built. +func (c ContainerLifecycleHooks) Built(ctx context.Context) func(req ContainerRequest) error { + return containerRequestHook(ctx, c.PostBuilds) +} + // Creating is a hook that will be called before a container is created. func (c ContainerLifecycleHooks) Creating(ctx context.Context) func(req ContainerRequest) error { + return containerRequestHook(ctx, c.PreCreates) +} + +// containerRequestHook returns a function that will iterate over all +// the hooks and call them one by one until there is an error. +func containerRequestHook(ctx context.Context, hooks []ContainerRequestHook) func(req ContainerRequest) error { return func(req ContainerRequest) error { - for _, hook := range c.PreCreates { + for _, hook := range hooks { if err := hook(ctx, req); err != nil { return err } @@ -411,9 +467,11 @@ func (c ContainerLifecycleHooks) Creating(ctx context.Context) func(req Containe // container lifecycle hooks. The created function will iterate over all the hooks and call them one by one. func containerHookFn(ctx context.Context, containerHook []ContainerHook) func(container Container) error { return func(ctr Container) error { - errs := make([]error, len(containerHook)) - for i, hook := range containerHook { - errs[i] = hook(ctx, ctr) + var errs []error + for _, hook := range containerHook { + if err := hook(ctx, ctr); err != nil { + errs = append(errs, err) + } } return errors.Join(errs...) @@ -532,65 +590,50 @@ func (p *DockerProvider) preCreateContainerHook(ctx context.Context, req Contain return nil } -// combineContainerHooks it returns just one ContainerLifecycle hook, as the result of combining -// the default hooks with the user-defined hooks. The function will loop over all the default hooks, -// storing each of the hooks in a slice, and then it will loop over all the user-defined hooks, -// appending or prepending them to the slice of hooks. The order of hooks is the following: -// - for Pre-hooks, always run the default hooks first, then append the user-defined hooks -// - for Post-hooks, always run the user-defined hooks first, then the default hooks +// combineContainerHooks returns a ContainerLifecycle hook as the result +// of combining the default hooks with the user-defined hooks. +// +// The order of hooks is the following: +// - Pre-hooks run the default hooks first then the user-defined hooks +// - Post-hooks run the user-defined hooks first then the default hooks func combineContainerHooks(defaultHooks, userDefinedHooks []ContainerLifecycleHooks) ContainerLifecycleHooks { - preCreates := []ContainerRequestHook{} - postCreates := []ContainerHook{} - preStarts := []ContainerHook{} - postStarts := []ContainerHook{} - postReadies := []ContainerHook{} - preStops := []ContainerHook{} - postStops := []ContainerHook{} - preTerminates := []ContainerHook{} - postTerminates := []ContainerHook{} - + // We use reflection here to ensure that any new hooks are handled. + var hooks ContainerLifecycleHooks + hooksVal := reflect.ValueOf(&hooks).Elem() + hooksType := reflect.TypeOf(hooks) for _, defaultHook := range defaultHooks { - preCreates = append(preCreates, defaultHook.PreCreates...) - preStarts = append(preStarts, defaultHook.PreStarts...) - preStops = append(preStops, defaultHook.PreStops...) - preTerminates = append(preTerminates, defaultHook.PreTerminates...) + defaultVal := reflect.ValueOf(defaultHook) + for i := 0; i < hooksType.NumField(); i++ { + if strings.HasPrefix(hooksType.Field(i).Name, "Pre") { + field := hooksVal.Field(i) + field.Set(reflect.AppendSlice(field, defaultVal.Field(i))) + } + } } - // append the user-defined hooks after the default pre-hooks - // and because the post hooks are still empty, the user-defined post-hooks - // will be the first ones to be executed + // Append the user-defined hooks after the default pre-hooks + // and because the post hooks are still empty, the user-defined + // post-hooks will be the first ones to be executed. for _, userDefinedHook := range userDefinedHooks { - preCreates = append(preCreates, userDefinedHook.PreCreates...) - postCreates = append(postCreates, userDefinedHook.PostCreates...) - preStarts = append(preStarts, userDefinedHook.PreStarts...) - postStarts = append(postStarts, userDefinedHook.PostStarts...) - postReadies = append(postReadies, userDefinedHook.PostReadies...) - preStops = append(preStops, userDefinedHook.PreStops...) - postStops = append(postStops, userDefinedHook.PostStops...) - preTerminates = append(preTerminates, userDefinedHook.PreTerminates...) - postTerminates = append(postTerminates, userDefinedHook.PostTerminates...) + userVal := reflect.ValueOf(userDefinedHook) + for i := 0; i < hooksType.NumField(); i++ { + field := hooksVal.Field(i) + field.Set(reflect.AppendSlice(field, userVal.Field(i))) + } } - // finally, append the default post-hooks + // Finally, append the default post-hooks. for _, defaultHook := range defaultHooks { - postCreates = append(postCreates, defaultHook.PostCreates...) - postStarts = append(postStarts, defaultHook.PostStarts...) - postReadies = append(postReadies, defaultHook.PostReadies...) - postStops = append(postStops, defaultHook.PostStops...) - postTerminates = append(postTerminates, defaultHook.PostTerminates...) + defaultVal := reflect.ValueOf(defaultHook) + for i := 0; i < hooksType.NumField(); i++ { + if strings.HasPrefix(hooksType.Field(i).Name, "Post") { + field := hooksVal.Field(i) + field.Set(reflect.AppendSlice(field, defaultVal.Field(i))) + } + } } - return ContainerLifecycleHooks{ - PreCreates: preCreates, - PostCreates: postCreates, - PreStarts: preStarts, - PostStarts: postStarts, - PostReadies: postReadies, - PreStops: preStops, - PostStops: postStops, - PreTerminates: preTerminates, - PostTerminates: postTerminates, - } + return hooks } func mergePortBindings(configPortMap, exposedPortMap nat.PortMap, exposedPorts []string) nat.PortMap { diff --git a/vendor/github.com/testcontainers/testcontainers-go/logger.go b/vendor/github.com/testcontainers/testcontainers-go/logger.go index fca5da53..1a5ae5dc 100644 --- a/vendor/github.com/testcontainers/testcontainers-go/logger.go +++ b/vendor/github.com/testcontainers/testcontainers-go/logger.go @@ -11,17 +11,18 @@ import ( ) // Logger is the default log instance -var Logger Logging = log.New(os.Stderr, "", log.LstdFlags) +var Logger Logging = &noopLogger{} func init() { - for _, arg := range os.Args { - if strings.EqualFold(arg, "-test.v=true") || strings.EqualFold(arg, "-v") { - return + // Enable default logger in the testing with a verbose flag. + if testing.Testing() { + // Parse manually because testing.Verbose() panics unless flag.Parse() has done. + for _, arg := range os.Args { + if strings.EqualFold(arg, "-test.v=true") || strings.EqualFold(arg, "-v") { + Logger = log.New(os.Stderr, "", log.LstdFlags) + } } } - - // If we are not running in verbose mode, we configure a noop logger by default. - Logger = &noopLogger{} } // Validate our types implement the required interfaces. diff --git a/vendor/github.com/testcontainers/testcontainers-go/mkdocs.yml b/vendor/github.com/testcontainers/testcontainers-go/mkdocs.yml index 2d80a5b4..a5dc8317 100644 --- a/vendor/github.com/testcontainers/testcontainers-go/mkdocs.yml +++ b/vendor/github.com/testcontainers/testcontainers-go/mkdocs.yml @@ -64,6 +64,8 @@ nav: - Log: features/wait/log.md - Multi: features/wait/multi.md - SQL: features/wait/sql.md + - TLS: features/wait/tls.md + - Walk: features/wait/walk.md - Modules: - modules/index.md - modules/artemis.md @@ -134,10 +136,8 @@ nav: - system_requirements/using_colima.md - system_requirements/using_podman.md - system_requirements/rancher.md - - Contributing: - - contributing.md - - contributing_docs.md + - Contributing: contributing.md - Getting help: getting_help.md edit_uri: edit/main/docs/ extra: - latest_version: v0.34.0 + latest_version: v0.35.0 diff --git a/vendor/github.com/testcontainers/testcontainers-go/modules/postgres/postgres.go b/vendor/github.com/testcontainers/testcontainers-go/modules/postgres/postgres.go index ac51721f..e25bc667 100644 --- a/vendor/github.com/testcontainers/testcontainers-go/modules/postgres/postgres.go +++ b/vendor/github.com/testcontainers/testcontainers-go/modules/postgres/postgres.go @@ -3,6 +3,8 @@ package postgres import ( "context" "database/sql" + _ "embed" + "errors" "fmt" "io" "net" @@ -18,6 +20,9 @@ const ( defaultSnapshotName = "migrated_template" ) +//go:embed resources/customEntrypoint.sh +var embeddedCustomEntrypoint string + // PostgresContainer represents the postgres container type used in the module type PostgresContainer struct { testcontainers.Container @@ -136,7 +141,7 @@ func WithUsername(user string) testcontainers.CustomizeRequestOption { // Deprecated: use Run instead // RunContainer creates an instance of the Postgres container type func RunContainer(ctx context.Context, opts ...testcontainers.ContainerCustomizer) (*PostgresContainer, error) { - return Run(ctx, "docker.io/postgres:16-alpine", opts...) + return Run(ctx, "postgres:16-alpine", opts...) } // Run creates an instance of the Postgres container type @@ -204,6 +209,43 @@ func WithSnapshotName(name string) SnapshotOption { } } +// WithSSLSettings configures the Postgres server to run with the provided CA Chain +// This will not function if the corresponding postgres conf is not correctly configured. +// Namely the paths below must match what is set in the conf file +func WithSSLCert(caCertFile string, certFile string, keyFile string) testcontainers.CustomizeRequestOption { + const defaultPermission = 0o600 + + return func(req *testcontainers.GenericContainerRequest) error { + const entrypointPath = "/usr/local/bin/docker-entrypoint-ssl.bash" + + req.Files = append(req.Files, + testcontainers.ContainerFile{ + HostFilePath: caCertFile, + ContainerFilePath: "/tmp/testcontainers-go/postgres/ca_cert.pem", + FileMode: defaultPermission, + }, + testcontainers.ContainerFile{ + HostFilePath: certFile, + ContainerFilePath: "/tmp/testcontainers-go/postgres/server.cert", + FileMode: defaultPermission, + }, + testcontainers.ContainerFile{ + HostFilePath: keyFile, + ContainerFilePath: "/tmp/testcontainers-go/postgres/server.key", + FileMode: defaultPermission, + }, + testcontainers.ContainerFile{ + Reader: strings.NewReader(embeddedCustomEntrypoint), + ContainerFilePath: entrypointPath, + FileMode: defaultPermission, + }, + ) + req.Entrypoint = []string{"sh", entrypointPath} + + return nil + } +} + // Snapshot takes a snapshot of the current state of the database as a template, which can then be restored using // the Restore method. By default, the snapshot will be created under a database called migrated_template, you can // customize the snapshot name with the options. @@ -262,7 +304,7 @@ func (c *PostgresContainer) checkSnapshotConfig(opts []SnapshotOption) (string, } if c.dbName == "postgres" { - return "", fmt.Errorf("cannot restore the postgres system database as it cannot be dropped to be restored") + return "", errors.New("cannot restore the postgres system database as it cannot be dropped to be restored") } return snapshotName, nil } diff --git a/vendor/github.com/testcontainers/testcontainers-go/modules/postgres/resources/customEntrypoint.sh b/vendor/github.com/testcontainers/testcontainers-go/modules/postgres/resources/customEntrypoint.sh new file mode 100644 index 00000000..ff4ffa42 --- /dev/null +++ b/vendor/github.com/testcontainers/testcontainers-go/modules/postgres/resources/customEntrypoint.sh @@ -0,0 +1,25 @@ +#!/usr/bin/env bash +set -Eeo pipefail + + +pUID=$(id -u postgres) +pGID=$(id -g postgres) + +if [ -z "$pUID" ] +then + echo "Unable to find postgres user id, required in order to chown key material" + exit 1 +fi + +if [ -z "$pGID" ] +then + echo "Unable to find postgres group id, required in order to chown key material" + exit 1 +fi + +chown "$pUID":"$pGID" \ + /tmp/testcontainers-go/postgres/ca_cert.pem \ + /tmp/testcontainers-go/postgres/server.cert \ + /tmp/testcontainers-go/postgres/server.key + +/usr/local/bin/docker-entrypoint.sh "$@" diff --git a/vendor/github.com/testcontainers/testcontainers-go/network.go b/vendor/github.com/testcontainers/testcontainers-go/network.go index 9544bee1..e0cc83f5 100644 --- a/vendor/github.com/testcontainers/testcontainers-go/network.go +++ b/vendor/github.com/testcontainers/testcontainers-go/network.go @@ -4,6 +4,8 @@ import ( "context" "github.com/docker/docker/api/types/network" + + "github.com/testcontainers/testcontainers-go/internal/core" ) // NetworkProvider allows the creation of networks on an arbitrary system @@ -23,12 +25,12 @@ type DefaultNetwork string // Deprecated: will be removed in the future. func (n DefaultNetwork) ApplyGenericTo(opts *GenericProviderOptions) { - opts.DefaultNetwork = string(n) + opts.defaultNetwork = string(n) } // Deprecated: will be removed in the future. func (n DefaultNetwork) ApplyDockerTo(opts *DockerProviderOptions) { - opts.DefaultNetwork = string(n) + opts.defaultNetwork = string(n) } // Deprecated: will be removed in the future @@ -47,3 +49,12 @@ type NetworkRequest struct { ReaperImage string // Deprecated: use WithImageName ContainerOption instead. Alternative reaper registry ReaperOptions []ContainerOption // Deprecated: the reaper is configured at the properties level, for an entire test session } + +// sessionID returns the session ID for the network request. +func (r NetworkRequest) sessionID() string { + if sessionID := r.Labels[core.LabelSessionID]; sessionID != "" { + return sessionID + } + + return core.SessionID() +} diff --git a/vendor/github.com/testcontainers/testcontainers-go/options.go b/vendor/github.com/testcontainers/testcontainers-go/options.go index 2849b156..4eb39058 100644 --- a/vendor/github.com/testcontainers/testcontainers-go/options.go +++ b/vendor/github.com/testcontainers/testcontainers-go/options.go @@ -186,7 +186,7 @@ func (p prependHubRegistry) Description() string { // - if the prefix is empty, the image is returned as is. // - if the image is a non-hub image (e.g. where another registry is set), the image is returned as is. // - if the image is a Docker Hub image where the hub registry is explicitly part of the name -// (i.e. anything with a docker.io or registry.hub.docker.com host part), the image is returned as is. +// (i.e. anything with a registry.hub.docker.com host part), the image is returned as is. func (p prependHubRegistry) Substitute(image string) (string, error) { registry := core.ExtractRegistry(image, "") diff --git a/vendor/github.com/testcontainers/testcontainers-go/port_forwarding.go b/vendor/github.com/testcontainers/testcontainers-go/port_forwarding.go index 88f14f2d..3411ff0c 100644 --- a/vendor/github.com/testcontainers/testcontainers-go/port_forwarding.go +++ b/vendor/github.com/testcontainers/testcontainers-go/port_forwarding.go @@ -6,6 +6,7 @@ import ( "fmt" "io" "net" + "sync" "time" "github.com/docker/docker/api/types/container" @@ -38,9 +39,9 @@ var sshPassword = uuid.NewString() // 1. Create a new SSHD container. // 2. Expose the host ports to the container after the container is ready. // 3. Close the SSH sessions before killing the container. -func exposeHostPorts(ctx context.Context, req *ContainerRequest, ports ...int) (sshdConnectHook ContainerLifecycleHooks, err error) { //nolint:nonamedreturns // Required for error check. +func exposeHostPorts(ctx context.Context, req *ContainerRequest, ports ...int) (sshdConnectHook ContainerLifecycleHooks, err error) { if len(ports) == 0 { - return sshdConnectHook, fmt.Errorf("no ports to expose") + return sshdConnectHook, errors.New("no ports to expose") } // Use the first network of the container to connect to the SSHD container. @@ -99,10 +100,26 @@ func exposeHostPorts(ctx context.Context, req *ContainerRequest, ports ...int) ( return sshdConnectHook, fmt.Errorf("new sshd container: %w", err) } - // IP in the first network of the container - sshdIP, err := sshdContainer.ContainerIP(context.Background()) + // IP in the first network of the container. + inspect, err := sshdContainer.Inspect(ctx) if err != nil { - return sshdConnectHook, fmt.Errorf("get sshd container IP: %w", err) + return sshdConnectHook, fmt.Errorf("inspect sshd container: %w", err) + } + + // TODO: remove once we have docker context support via #2810 + sshdIP := inspect.NetworkSettings.IPAddress + if sshdIP == "" { + single := len(inspect.NetworkSettings.Networks) == 1 + for name, network := range inspect.NetworkSettings.Networks { + if name == sshdFirstNetwork || single { + sshdIP = network.IPAddress + break + } + } + } + + if sshdIP == "" { + return sshdConnectHook, errors.New("sshd container IP not found") } if req.HostConfigModifier == nil { @@ -166,11 +183,10 @@ func exposeHostPorts(ctx context.Context, req *ContainerRequest, ports ...int) ( func newSshdContainer(ctx context.Context, opts ...ContainerCustomizer) (*sshdContainer, error) { req := GenericContainerRequest{ ContainerRequest: ContainerRequest{ - Image: sshdImage, - HostAccessPorts: []int{}, // empty list because it does not need any port - ExposedPorts: []string{sshPort}, - Env: map[string]string{"PASSWORD": sshPassword}, - WaitingFor: wait.ForListeningPort(sshPort), + Image: sshdImage, + ExposedPorts: []string{sshPort}, + Env: map[string]string{"PASSWORD": sshPassword}, + WaitingFor: wait.ForListeningPort(sshPort), }, Started: true, } @@ -191,183 +207,221 @@ func newSshdContainer(ctx context.Context, opts ...ContainerCustomizer) (*sshdCo return sshd, fmt.Errorf("generic container: %w", err) } - sshClientConfig, err := configureSSHConfig(ctx, sshd) - if err != nil { - // return the container and the error to the caller to handle it + if err = sshd.clientConfig(ctx); err != nil { + // Return the container and the error to the caller to handle it. return sshd, err } - sshd.sshConfig = sshClientConfig - return sshd, nil } // sshdContainer represents the SSHD container type used for the port forwarding container. -// It's an internal type that extends the DockerContainer type, to add the SSH tunneling capabilities. +// It's an internal type that extends the DockerContainer type, to add the SSH tunnelling capabilities. type sshdContainer struct { Container port string sshConfig *ssh.ClientConfig - portForwarders []PortForwarder + portForwarders []*portForwarder } // Terminate stops the container and closes the SSH session -func (sshdC *sshdContainer) Terminate(ctx context.Context) error { - sshdC.closePorts(ctx) - - return sshdC.Container.Terminate(ctx) +func (sshdC *sshdContainer) Terminate(ctx context.Context, opts ...TerminateOption) error { + return errors.Join( + sshdC.closePorts(), + sshdC.Container.Terminate(ctx, opts...), + ) } // Stop stops the container and closes the SSH session func (sshdC *sshdContainer) Stop(ctx context.Context, timeout *time.Duration) error { - sshdC.closePorts(ctx) - - return sshdC.Container.Stop(ctx, timeout) + return errors.Join( + sshdC.closePorts(), + sshdC.Container.Stop(ctx, timeout), + ) } // closePorts closes all port forwarders. -func (sshdC *sshdContainer) closePorts(ctx context.Context) { +func (sshdC *sshdContainer) closePorts() error { + var errs []error for _, pfw := range sshdC.portForwarders { - pfw.Close(ctx) + if err := pfw.Close(); err != nil { + errs = append(errs, err) + } } sshdC.portForwarders = nil // Ensure the port forwarders are not used after closing. + return errors.Join(errs...) } -func configureSSHConfig(ctx context.Context, sshdC *sshdContainer) (*ssh.ClientConfig, error) { +// clientConfig sets up the the SSHD client configuration. +func (sshdC *sshdContainer) clientConfig(ctx context.Context) error { mappedPort, err := sshdC.MappedPort(ctx, sshPort) if err != nil { - return nil, fmt.Errorf("mapped port: %w", err) + return fmt.Errorf("mapped port: %w", err) } - sshdC.port = mappedPort.Port() - sshConfig := ssh.ClientConfig{ + sshdC.port = mappedPort.Port() + sshdC.sshConfig = &ssh.ClientConfig{ User: user, HostKeyCallback: ssh.InsecureIgnoreHostKey(), Auth: []ssh.AuthMethod{ssh.Password(sshPassword)}, - Timeout: 30 * time.Second, } - return &sshConfig, nil + return nil } -func (sshdC *sshdContainer) exposeHostPort(ctx context.Context, ports ...int) error { +// exposeHostPort exposes the host ports to the container. +func (sshdC *sshdContainer) exposeHostPort(ctx context.Context, ports ...int) (err error) { + defer func() { + if err != nil { + err = errors.Join(err, sshdC.closePorts()) + } + }() for _, port := range ports { - pw := NewPortForwarder(fmt.Sprintf("localhost:%s", sshdC.port), sshdC.sshConfig, port, port) - sshdC.portForwarders = append(sshdC.portForwarders, *pw) - - go pw.Forward(ctx) //nolint:errcheck // Nothing we can usefully do with the error - } - - var err error + pf, err := newPortForwarder(ctx, "localhost:"+sshdC.port, sshdC.sshConfig, port) + if err != nil { + return fmt.Errorf("new port forwarder: %w", err) + } - // continue when all port forwarders have created the connection - for _, pfw := range sshdC.portForwarders { - err = errors.Join(err, <-pfw.connectionCreated) + sshdC.portForwarders = append(sshdC.portForwarders, pf) } - return err + return nil } -type PortForwarder struct { - sshDAddr string - sshConfig *ssh.ClientConfig - remotePort int - localPort int - connectionCreated chan error // used to signal that the connection has been created, so the caller can proceed - terminateChan chan struct{} // used to signal that the connection has been terminated +// portForwarder forwards a port from the container to the host. +type portForwarder struct { + client *ssh.Client + listener net.Listener + dialTimeout time.Duration + localAddr string + ctx context.Context + cancel context.CancelFunc + + // closeMtx protects the close operation + closeMtx sync.Mutex + closeErr error } -func NewPortForwarder(sshDAddr string, sshConfig *ssh.ClientConfig, remotePort, localPort int) *PortForwarder { - return &PortForwarder{ - sshDAddr: sshDAddr, - sshConfig: sshConfig, - remotePort: remotePort, - localPort: localPort, - connectionCreated: make(chan error), - terminateChan: make(chan struct{}), +// newPortForwarder creates a new running portForwarder for the given port. +// The context is only used for the initial SSH connection. +func newPortForwarder(ctx context.Context, sshDAddr string, sshConfig *ssh.ClientConfig, port int) (pf *portForwarder, err error) { + var d net.Dialer + conn, err := d.DialContext(ctx, "tcp", sshDAddr) + if err != nil { + return nil, fmt.Errorf("ssh dial: %w", err) } -} -func (pf *PortForwarder) Close(ctx context.Context) { - close(pf.terminateChan) - close(pf.connectionCreated) -} + // Ensure the connection is closed in case of error. + defer func() { + if err != nil { + err = errors.Join(err, conn.Close()) + } + }() -func (pf *PortForwarder) Forward(ctx context.Context) error { - client, err := ssh.Dial("tcp", pf.sshDAddr, pf.sshConfig) + c, chans, reqs, err := ssh.NewClientConn(conn, sshDAddr, sshConfig) if err != nil { - err = fmt.Errorf("error dialing ssh server: %w", err) - pf.connectionCreated <- err - return err + return nil, fmt.Errorf("ssh new client conn: %w", err) } - defer client.Close() - listener, err := client.Listen("tcp", fmt.Sprintf("localhost:%d", pf.remotePort)) + client := ssh.NewClient(c, chans, reqs) + + listener, err := client.Listen("tcp", fmt.Sprintf("localhost:%d", port)) if err != nil { - err = fmt.Errorf("error listening on remote port: %w", err) - pf.connectionCreated <- err - return err + return nil, fmt.Errorf("listening on remote port %d: %w", port, err) + } + + ctx, cancel := context.WithCancel(context.Background()) + + pf = &portForwarder{ + client: client, + listener: listener, + localAddr: fmt.Sprintf("localhost:%d", port), + ctx: ctx, + cancel: cancel, + dialTimeout: time.Second * 2, } - defer listener.Close() - // signal that the connection has been created - pf.connectionCreated <- nil + go pf.run() + + return pf, nil +} + +// Close closes the port forwarder. +func (pf *portForwarder) Close() error { + pf.closeMtx.Lock() + defer pf.closeMtx.Unlock() - // check if the context or the terminateChan has been closed select { - case <-ctx.Done(): - if err := listener.Close(); err != nil { - return fmt.Errorf("error closing listener: %w", err) - } - if err := client.Close(); err != nil { - return fmt.Errorf("error closing client: %w", err) - } - return nil - case <-pf.terminateChan: - if err := listener.Close(); err != nil { - return fmt.Errorf("error closing listener: %w", err) - } - if err := client.Close(); err != nil { - return fmt.Errorf("error closing client: %w", err) - } - return nil + case <-pf.ctx.Done(): + // Already closed. + return pf.closeErr default: } + var errs []error + if err := pf.listener.Close(); err != nil { + errs = append(errs, fmt.Errorf("close listener: %w", err)) + } + if err := pf.client.Close(); err != nil { + errs = append(errs, fmt.Errorf("close client: %w", err)) + } + + pf.closeErr = errors.Join(errs...) + pf.cancel() + + return pf.closeErr +} + +// run forwards the port from the remote connection to the local connection. +func (pf *portForwarder) run() { for { - remote, err := listener.Accept() + remote, err := pf.listener.Accept() if err != nil { - return fmt.Errorf("error accepting connection: %w", err) + if errors.Is(err, io.EOF) { + // The listener has been closed. + return + } + + // Ignore errors as they are transient and we want requests to + // continue to be accepted. + continue } - go pf.runTunnel(ctx, remote) + go pf.tunnel(remote) } } -// runTunnel runs a tunnel between two connections; as soon as one connection -// reaches EOF or reports an error, both connections are closed and this -// function returns. -func (pf *PortForwarder) runTunnel(ctx context.Context, remote net.Conn) { +// tunnel runs a tunnel between two connections; as soon as the forwarder +// context is cancelled or one connection copies returns, irrespective of +// the error, both connections are closed. +func (pf *portForwarder) tunnel(remote net.Conn) { + defer remote.Close() + + ctx, cancel := context.WithTimeout(pf.ctx, pf.dialTimeout) + defer cancel() + var dialer net.Dialer - local, err := dialer.DialContext(ctx, "tcp", fmt.Sprintf("localhost:%d", pf.localPort)) + local, err := dialer.DialContext(ctx, "tcp", pf.localAddr) if err != nil { - remote.Close() + // Nothing we can do with the error. return } defer local.Close() - defer remote.Close() - done := make(chan struct{}, 2) + ctx, cancel = context.WithCancel(pf.ctx) go func() { - io.Copy(local, remote) //nolint:errcheck // Nothing we can usefully do with the error - done <- struct{}{} + defer cancel() + io.Copy(local, remote) //nolint:errcheck // Nothing useful we can do with the error. }() go func() { - io.Copy(remote, local) //nolint:errcheck // Nothing we can usefully do with the error - done <- struct{}{} + defer cancel() + io.Copy(remote, local) //nolint:errcheck // Nothing useful we can do with the error. }() - <-done + // Wait for the context to be done before returning which triggers + // both connections to close. This is done to to prevent the copies + // blocking forever on unused connections. + <-ctx.Done() } diff --git a/vendor/github.com/testcontainers/testcontainers-go/provider.go b/vendor/github.com/testcontainers/testcontainers-go/provider.go index b5e5ffa9..31714c0c 100644 --- a/vendor/github.com/testcontainers/testcontainers-go/provider.go +++ b/vendor/github.com/testcontainers/testcontainers-go/provider.go @@ -25,7 +25,7 @@ type ( // GenericProviderOptions defines options applicable to all providers GenericProviderOptions struct { Logger Logging - DefaultNetwork string + defaultNetwork string } // GenericProviderOption defines a common interface to modify GenericProviderOptions diff --git a/vendor/github.com/testcontainers/testcontainers-go/reaper.go b/vendor/github.com/testcontainers/testcontainers-go/reaper.go index 8f2bde8a..1d97a36f 100644 --- a/vendor/github.com/testcontainers/testcontainers-go/reaper.go +++ b/vendor/github.com/testcontainers/testcontainers-go/reaper.go @@ -83,7 +83,7 @@ func NewReaper(ctx context.Context, sessionID string, provider ReaperProvider, r func reaperContainerNameFromSessionID(sessionID string) string { // The session id is 64 characters, so we will not hit the limit of 128 // characters for container names. - return fmt.Sprintf("reaper_%s", sessionID) + return "reaper_" + sessionID } // reaperSpawner is a singleton that manages the reaper container. @@ -161,6 +161,13 @@ func (r *reaperSpawner) lookupContainer(ctx context.Context, sessionID string) ( } defer dockerClient.Close() + provider, err := NewDockerProvider() + if err != nil { + return nil, fmt.Errorf("new provider: %w", err) + } + + provider.SetClient(dockerClient) + opts := container.ListOptions{ All: true, Filters: filters.NewArgs( @@ -184,11 +191,10 @@ func (r *reaperSpawner) lookupContainer(ctx context.Context, sessionID string) ( } if len(resp) > 1 { - return nil, fmt.Errorf("multiple reaper containers found for session ID %s", sessionID) + return nil, fmt.Errorf("found %d reaper containers for session ID %q", len(resp), sessionID) } - container := resp[0] - r, err := containerFromDockerResponse(ctx, container) + r, err := provider.ContainerFromType(ctx, resp[0]) if err != nil { return nil, fmt.Errorf("from docker: %w", err) } @@ -271,7 +277,7 @@ func (r *reaperSpawner) reaper(ctx context.Context, sessionID string, provider R // If connect is true, the reaper will be connected to the reaper container. // It must be called with the lock held. func (r *reaperSpawner) retryLocked(ctx context.Context, sessionID string, provider ReaperProvider) func() (*Reaper, error) { - return func() (reaper *Reaper, err error) { //nolint:nonamedreturns // Needed for deferred error check. + return func() (reaper *Reaper, err error) { reaper, err = r.reuseOrCreate(ctx, sessionID, provider) // Ensure that the reaper is terminated if an error occurred. defer func() { @@ -366,7 +372,7 @@ func (r *reaperSpawner) fromContainer(ctx context.Context, sessionID string, pro // newReaper creates a connected Reaper with a sessionID to identify containers // and a provider to use. -func (r *reaperSpawner) newReaper(ctx context.Context, sessionID string, provider ReaperProvider) (reaper *Reaper, err error) { //nolint:nonamedreturns // Needed for deferred error check. +func (r *reaperSpawner) newReaper(ctx context.Context, sessionID string, provider ReaperProvider) (reaper *Reaper, err error) { dockerHostMount := core.MustExtractDockerSocket(ctx) port := r.port() @@ -402,7 +408,12 @@ func (r *reaperSpawner) newReaper(ctx context.Context, sessionID string, provide // Attach reaper container to a requested network if it is specified if p, ok := provider.(*DockerProvider); ok { - req.Networks = append(req.Networks, p.DefaultNetwork) + defaultNetwork, err := p.ensureDefaultNetwork(ctx) + if err != nil { + return nil, fmt.Errorf("ensure default network: %w", err) + } + + req.Networks = append(req.Networks, defaultNetwork) } c, err := provider.RunContainer(ctx, req) @@ -561,3 +572,8 @@ func (r *Reaper) handshake(conn net.Conn) error { func (r *Reaper) Labels() map[string]string { return GenericLabels() } + +// isReaperImage returns true if the image name is the reaper image. +func isReaperImage(name string) bool { + return strings.HasSuffix(name, config.ReaperDefaultImage) +} diff --git a/vendor/github.com/testcontainers/testcontainers-go/requirements.txt b/vendor/github.com/testcontainers/testcontainers-go/requirements.txt index 83689b0f..e4db8827 100644 --- a/vendor/github.com/testcontainers/testcontainers-go/requirements.txt +++ b/vendor/github.com/testcontainers/testcontainers-go/requirements.txt @@ -1,5 +1,5 @@ mkdocs==1.5.3 mkdocs-codeinclude-plugin==0.2.1 -mkdocs-include-markdown-plugin==6.0.4 +mkdocs-include-markdown-plugin==6.2.2 mkdocs-material==9.5.18 -mkdocs-markdownextradata-plugin==0.2.5 +mkdocs-markdownextradata-plugin==0.2.6 diff --git a/vendor/github.com/testcontainers/testcontainers-go/sonar-project.properties b/vendor/github.com/testcontainers/testcontainers-go/sonar-project.properties index 67ef15fc..977327f7 100644 --- a/vendor/github.com/testcontainers/testcontainers-go/sonar-project.properties +++ b/vendor/github.com/testcontainers/testcontainers-go/sonar-project.properties @@ -7,7 +7,7 @@ sonar.projectKey=testcontainers_testcontainers-go sonar.projectName=testcontainers-go -sonar.projectVersion=v0.34.0 +sonar.projectVersion=v0.35.0 sonar.sources=. diff --git a/vendor/github.com/testcontainers/testcontainers-go/testing.go b/vendor/github.com/testcontainers/testcontainers-go/testing.go index 0601d9fa..8502f018 100644 --- a/vendor/github.com/testcontainers/testcontainers-go/testing.go +++ b/vendor/github.com/testcontainers/testcontainers-go/testing.go @@ -3,6 +3,7 @@ package testcontainers import ( "context" "fmt" + "io" "regexp" "testing" @@ -36,14 +37,10 @@ func SkipIfProviderIsNotHealthy(t *testing.T) { func SkipIfDockerDesktop(t *testing.T, ctx context.Context) { t.Helper() cli, err := NewDockerClientWithOpts(ctx) - if err != nil { - t.Fatalf("failed to create docker client: %s", err) - } + require.NoErrorf(t, err, "failed to create docker client: %s", err) info, err := cli.Info(ctx) - if err != nil { - t.Fatalf("failed to get docker info: %s", err) - } + require.NoErrorf(t, err, "failed to get docker info: %s", err) if info.OperatingSystem == "Docker Desktop" { t.Skip("Skipping test that requires host network access when running in Docker Desktop") @@ -86,7 +83,9 @@ func CleanupNetwork(tb testing.TB, network Network) { tb.Helper() tb.Cleanup(func() { - noErrorOrIgnored(tb, network.Remove(context.Background())) + if !isNil(network) { + noErrorOrIgnored(tb, network.Remove(context.Background())) + } }) } @@ -151,3 +150,18 @@ func isCleanupSafe(err error) bool { return false } } + +// RequireContainerExec is a helper function that executes a command in a container +// It insures that there is no error during the execution +// Finally returns the output of its execution +func RequireContainerExec(ctx context.Context, t *testing.T, container Container, cmd []string) string { + t.Helper() + + code, out, err := container.Exec(ctx, cmd) + require.NoError(t, err) + require.Zero(t, code) + + checkBytes, err := io.ReadAll(out) + require.NoError(t, err) + return string(checkBytes) +} diff --git a/vendor/github.com/testcontainers/testcontainers-go/wait/all.go b/vendor/github.com/testcontainers/testcontainers-go/wait/all.go index fb097fb5..fb7eb4e5 100644 --- a/vendor/github.com/testcontainers/testcontainers-go/wait/all.go +++ b/vendor/github.com/testcontainers/testcontainers-go/wait/all.go @@ -2,7 +2,7 @@ package wait import ( "context" - "fmt" + "errors" "time" ) @@ -58,7 +58,7 @@ func (ms *MultiStrategy) WaitUntilReady(ctx context.Context, target StrategyTarg } if len(ms.Strategies) == 0 { - return fmt.Errorf("no wait strategy supplied") + return errors.New("no wait strategy supplied") } for _, strategy := range ms.Strategies { diff --git a/vendor/github.com/testcontainers/testcontainers-go/wait/host_port.go b/vendor/github.com/testcontainers/testcontainers-go/wait/host_port.go index 9360517a..7d8b9e76 100644 --- a/vendor/github.com/testcontainers/testcontainers-go/wait/host_port.go +++ b/vendor/github.com/testcontainers/testcontainers-go/wait/host_port.go @@ -126,7 +126,7 @@ func (hp *HostPortStrategy) WaitUntilReady(ctx context.Context, target StrategyT } if internalPort == "" { - return fmt.Errorf("no port to wait for") + return errors.New("no port to wait for") } var port nat.Port diff --git a/vendor/github.com/testcontainers/testcontainers-go/wait/log.go b/vendor/github.com/testcontainers/testcontainers-go/wait/log.go index 530077f9..41c96e3e 100644 --- a/vendor/github.com/testcontainers/testcontainers-go/wait/log.go +++ b/vendor/github.com/testcontainers/testcontainers-go/wait/log.go @@ -1,10 +1,12 @@ package wait import ( + "bytes" "context" + "errors" + "fmt" "io" "regexp" - "strings" "time" ) @@ -14,6 +16,21 @@ var ( _ StrategyTimeout = (*LogStrategy)(nil) ) +// PermanentError is a special error that will stop the wait and return an error. +type PermanentError struct { + err error +} + +// Error implements the error interface. +func (e *PermanentError) Error() string { + return e.err.Error() +} + +// NewPermanentError creates a new PermanentError. +func NewPermanentError(err error) *PermanentError { + return &PermanentError{err: err} +} + // LogStrategy will wait until a given log entry shows up in the docker logs type LogStrategy struct { // all Strategies should have a startupTimeout to avoid waiting infinitely @@ -24,6 +41,18 @@ type LogStrategy struct { IsRegexp bool Occurrence int PollInterval time.Duration + + // check is the function that will be called to check if the log entry is present. + check func([]byte) error + + // submatchCallback is a callback that will be called with the sub matches of the regexp. + submatchCallback func(pattern string, matches [][][]byte) error + + // re is the optional compiled regexp. + re *regexp.Regexp + + // log byte slice version of [LogStrategy.Log] used for count checks. + log []byte } // NewLogStrategy constructs with polling interval of 100 milliseconds and startup timeout of 60 seconds by default @@ -46,6 +75,18 @@ func (ws *LogStrategy) AsRegexp() *LogStrategy { return ws } +// Submatch configures a function that will be called with the result of +// [regexp.Regexp.FindAllSubmatch], allowing the caller to process the results. +// If the callback returns nil, the strategy will be considered successful. +// Returning a [PermanentError] will stop the wait and return an error, otherwise +// it will retry until the timeout is reached. +// [LogStrategy.Occurrence] is ignored if this option is set. +func (ws *LogStrategy) Submatch(callback func(pattern string, matches [][][]byte) error) *LogStrategy { + ws.submatchCallback = callback + + return ws +} + // WithStartupTimeout can be used to change the default startup timeout func (ws *LogStrategy) WithStartupTimeout(timeout time.Duration) *LogStrategy { ws.timeout = &timeout @@ -89,57 +130,85 @@ func (ws *LogStrategy) WaitUntilReady(ctx context.Context, target StrategyTarget timeout = *ws.timeout } + switch { + case ws.submatchCallback != nil: + ws.re = regexp.MustCompile(ws.Log) + ws.check = ws.checkSubmatch + case ws.IsRegexp: + ws.re = regexp.MustCompile(ws.Log) + ws.check = ws.checkRegexp + default: + ws.log = []byte(ws.Log) + ws.check = ws.checkCount + } + ctx, cancel := context.WithTimeout(ctx, timeout) defer cancel() - length := 0 - -LOOP: + var lastLen int + var lastError error for { select { case <-ctx.Done(): - return ctx.Err() + return errors.Join(lastError, ctx.Err()) default: checkErr := checkTarget(ctx, target) reader, err := target.Logs(ctx) if err != nil { + // TODO: fix as this will wait for timeout if the logs are not available. time.Sleep(ws.PollInterval) continue } b, err := io.ReadAll(reader) if err != nil { + // TODO: fix as this will wait for timeout if the logs are not readable. time.Sleep(ws.PollInterval) continue } - logs := string(b) - - switch { - case length == len(logs) && checkErr != nil: + if lastLen == len(b) && checkErr != nil { + // Log length hasn't changed so we're not making progress. return checkErr - case checkLogsFn(ws, b): - break LOOP - default: - length = len(logs) + } + + if err := ws.check(b); err != nil { + var errPermanent *PermanentError + if errors.As(err, &errPermanent) { + return err + } + + lastError = err + lastLen = len(b) time.Sleep(ws.PollInterval) continue } + + return nil } } +} + +// checkCount checks if the log entry is present in the logs using a string count. +func (ws *LogStrategy) checkCount(b []byte) error { + if count := bytes.Count(b, ws.log); count < ws.Occurrence { + return fmt.Errorf("%q matched %d times, expected %d", ws.Log, count, ws.Occurrence) + } return nil } -func checkLogsFn(ws *LogStrategy, b []byte) bool { - if ws.IsRegexp { - re := regexp.MustCompile(ws.Log) - occurrences := re.FindAll(b, -1) - - return len(occurrences) >= ws.Occurrence +// checkRegexp checks if the log entry is present in the logs using a regexp count. +func (ws *LogStrategy) checkRegexp(b []byte) error { + if matches := ws.re.FindAll(b, -1); len(matches) < ws.Occurrence { + return fmt.Errorf("`%s` matched %d times, expected %d", ws.Log, len(matches), ws.Occurrence) } - logs := string(b) - return strings.Count(logs, ws.Log) >= ws.Occurrence + return nil +} + +// checkSubmatch checks if the log entry is present in the logs using a regexp sub match callback. +func (ws *LogStrategy) checkSubmatch(b []byte) error { + return ws.submatchCallback(ws.Log, ws.re.FindAllSubmatch(b, -1)) } diff --git a/vendor/github.com/testcontainers/testcontainers-go/wait/tls.go b/vendor/github.com/testcontainers/testcontainers-go/wait/tls.go new file mode 100644 index 00000000..ab904b27 --- /dev/null +++ b/vendor/github.com/testcontainers/testcontainers-go/wait/tls.go @@ -0,0 +1,167 @@ +package wait + +import ( + "context" + "crypto/tls" + "crypto/x509" + "fmt" + "io" + "time" +) + +// Validate we implement interface. +var _ Strategy = (*TLSStrategy)(nil) + +// TLSStrategy is a strategy for handling TLS. +type TLSStrategy struct { + // General Settings. + timeout *time.Duration + pollInterval time.Duration + + // Custom Settings. + certFiles *x509KeyPair + rootFiles []string + + // State. + tlsConfig *tls.Config +} + +// x509KeyPair is a pair of certificate and key files. +type x509KeyPair struct { + certPEMFile string + keyPEMFile string +} + +// ForTLSCert returns a CertStrategy that will add a Certificate to the [tls.Config] +// constructed from PEM formatted certificate key file pair in the container. +func ForTLSCert(certPEMFile, keyPEMFile string) *TLSStrategy { + return &TLSStrategy{ + certFiles: &x509KeyPair{ + certPEMFile: certPEMFile, + keyPEMFile: keyPEMFile, + }, + tlsConfig: &tls.Config{}, + pollInterval: defaultPollInterval(), + } +} + +// ForTLSRootCAs returns a CertStrategy that sets the root CAs for the [tls.Config] +// using the given PEM formatted files from the container. +func ForTLSRootCAs(pemFiles ...string) *TLSStrategy { + return &TLSStrategy{ + rootFiles: pemFiles, + tlsConfig: &tls.Config{}, + pollInterval: defaultPollInterval(), + } +} + +// WithRootCAs sets the root CAs for the [tls.Config] using the given files from +// the container. +func (ws *TLSStrategy) WithRootCAs(files ...string) *TLSStrategy { + ws.rootFiles = files + return ws +} + +// WithCert sets the [tls.Config] Certificates using the given files from the container. +func (ws *TLSStrategy) WithCert(certPEMFile, keyPEMFile string) *TLSStrategy { + ws.certFiles = &x509KeyPair{ + certPEMFile: certPEMFile, + keyPEMFile: keyPEMFile, + } + return ws +} + +// WithServerName sets the server for the [tls.Config]. +func (ws *TLSStrategy) WithServerName(serverName string) *TLSStrategy { + ws.tlsConfig.ServerName = serverName + return ws +} + +// WithStartupTimeout can be used to change the default startup timeout. +func (ws *TLSStrategy) WithStartupTimeout(startupTimeout time.Duration) *TLSStrategy { + ws.timeout = &startupTimeout + return ws +} + +// WithPollInterval can be used to override the default polling interval of 100 milliseconds. +func (ws *TLSStrategy) WithPollInterval(pollInterval time.Duration) *TLSStrategy { + ws.pollInterval = pollInterval + return ws +} + +// TLSConfig returns the TLS config once the strategy is ready. +// If the strategy is nil, it returns nil. +func (ws *TLSStrategy) TLSConfig() *tls.Config { + if ws == nil { + return nil + } + + return ws.tlsConfig +} + +// WaitUntilReady implements the [Strategy] interface. +// It waits for the CA, client cert and key files to be available in the container and +// uses them to setup the TLS config. +func (ws *TLSStrategy) WaitUntilReady(ctx context.Context, target StrategyTarget) error { + size := len(ws.rootFiles) + if ws.certFiles != nil { + size += 2 + } + strategies := make([]Strategy, 0, size) + for _, file := range ws.rootFiles { + strategies = append(strategies, + ForFile(file).WithMatcher(func(r io.Reader) error { + buf, err := io.ReadAll(r) + if err != nil { + return fmt.Errorf("read CA cert file %q: %w", file, err) + } + + if ws.tlsConfig.RootCAs == nil { + ws.tlsConfig.RootCAs = x509.NewCertPool() + } + + if !ws.tlsConfig.RootCAs.AppendCertsFromPEM(buf) { + return fmt.Errorf("invalid CA cert file %q", file) + } + + return nil + }).WithPollInterval(ws.pollInterval), + ) + } + + if ws.certFiles != nil { + var certPEMBlock []byte + strategies = append(strategies, + ForFile(ws.certFiles.certPEMFile).WithMatcher(func(r io.Reader) error { + var err error + if certPEMBlock, err = io.ReadAll(r); err != nil { + return fmt.Errorf("read certificate cert %q: %w", ws.certFiles.certPEMFile, err) + } + + return nil + }).WithPollInterval(ws.pollInterval), + ForFile(ws.certFiles.keyPEMFile).WithMatcher(func(r io.Reader) error { + keyPEMBlock, err := io.ReadAll(r) + if err != nil { + return fmt.Errorf("read certificate key %q: %w", ws.certFiles.keyPEMFile, err) + } + + cert, err := tls.X509KeyPair(certPEMBlock, keyPEMBlock) + if err != nil { + return fmt.Errorf("x509 key pair %q %q: %w", ws.certFiles.certPEMFile, ws.certFiles.keyPEMFile, err) + } + + ws.tlsConfig.Certificates = []tls.Certificate{cert} + + return nil + }).WithPollInterval(ws.pollInterval), + ) + } + + strategy := ForAll(strategies...) + if ws.timeout != nil { + strategy.WithStartupTimeout(*ws.timeout) + } + + return strategy.WaitUntilReady(ctx, target) +} diff --git a/vendor/github.com/testcontainers/testcontainers-go/wait/walk.go b/vendor/github.com/testcontainers/testcontainers-go/wait/walk.go new file mode 100644 index 00000000..4685e500 --- /dev/null +++ b/vendor/github.com/testcontainers/testcontainers-go/wait/walk.go @@ -0,0 +1,74 @@ +package wait + +import ( + "errors" +) + +var ( + // VisitStop is used as a return value from [VisitFunc] to stop the walk. + // It is not returned as an error by any function. + VisitStop = errors.New("stop the walk") + + // VisitRemove is used as a return value from [VisitFunc] to have the current node removed. + // It is not returned as an error by any function. + VisitRemove = errors.New("remove this strategy") +) + +// VisitFunc is a function that visits a strategy node. +// If it returns [VisitStop], the walk stops. +// If it returns [VisitRemove], the current node is removed. +type VisitFunc func(root Strategy) error + +// Walk walks the strategies tree and calls the visit function for each node. +func Walk(root *Strategy, visit VisitFunc) error { + if root == nil { + return errors.New("root strategy is nil") + } + + if err := walk(root, visit); err != nil { + if errors.Is(err, VisitRemove) || errors.Is(err, VisitStop) { + return nil + } + return err + } + + return nil +} + +// walk walks the strategies tree and calls the visit function for each node. +// It returns an error if the visit function returns an error. +func walk(root *Strategy, visit VisitFunc) error { + if *root == nil { + // No strategy. + return nil + } + + // Allow the visit function to customize the behaviour of the walk before visiting the children. + if err := visit(*root); err != nil { + if errors.Is(err, VisitRemove) { + *root = nil + } + + return err + } + + if s, ok := (*root).(*MultiStrategy); ok { + var i int + for range s.Strategies { + if err := walk(&s.Strategies[i], visit); err != nil { + if errors.Is(err, VisitRemove) { + s.Strategies = append(s.Strategies[:i], s.Strategies[i+1:]...) + if errors.Is(err, VisitStop) { + return VisitStop + } + continue + } + + return err + } + i++ + } + } + + return nil +} diff --git a/vendor/modules.txt b/vendor/modules.txt index def1c7a7..b84e6053 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -298,7 +298,7 @@ github.com/sirupsen/logrus github.com/stretchr/testify/assert github.com/stretchr/testify/assert/yaml github.com/stretchr/testify/require -# github.com/testcontainers/testcontainers-go v0.34.0 +# github.com/testcontainers/testcontainers-go v0.35.0 ## explicit; go 1.22 github.com/testcontainers/testcontainers-go github.com/testcontainers/testcontainers-go/exec @@ -307,7 +307,7 @@ github.com/testcontainers/testcontainers-go/internal/config github.com/testcontainers/testcontainers-go/internal/core github.com/testcontainers/testcontainers-go/internal/core/network github.com/testcontainers/testcontainers-go/wait -# github.com/testcontainers/testcontainers-go/modules/postgres v0.34.0 +# github.com/testcontainers/testcontainers-go/modules/postgres v0.35.0 ## explicit; go 1.22 github.com/testcontainers/testcontainers-go/modules/postgres # github.com/tklauser/go-sysconf v0.3.14