From 4d6db01107d650e7cb3484e0b6ee0111aae7ea21 Mon Sep 17 00:00:00 2001 From: Alexei Ledenev Date: Wed, 21 May 2025 21:36:26 +0300 Subject: [PATCH] fix containerd support and docs --- README.md | 29 +- deploy/pumba_kube.yml | 10 + docs/advanced-network-chaos.md | 10 + examples/README.md | 6 + examples/containerd_delay_demo.sh | 10 + examples/pumba_containerd_delay.sh | 9 + pkg/container/containerd_client.go | 169 +++++---- pkg/container/containerd_client_test.go | 465 ++++++++++++++---------- 8 files changed, 422 insertions(+), 286 deletions(-) create mode 100755 examples/containerd_delay_demo.sh create mode 100755 examples/pumba_containerd_delay.sh diff --git a/README.md b/README.md index 0f549a89..e08c1ce9 100644 --- a/README.md +++ b/README.md @@ -89,17 +89,28 @@ Pumba supports both Docker and containerd runtimes. You can select the runtime u * TLS options (`--tls`, `--tlsverify`, etc.) are applicable for Docker TCP connections. * **containerd**: To use Pumba with containerd, specify `--runtime containerd`. - * `--containerd-address`: Set the path to the containerd socket (default: `/run/containerd/containerd.sock`). - * `--containerd-namespace`: Specify the containerd namespace to operate within (default: `k8s.io`, common in Kubernetes; other typical namespaces include `default`). + * `--containerd-address`: Path to the containerd socket (default: `/run/containerd/containerd.sock`). + * `--containerd-namespace`: Containerd namespace to operate within (default: `k8s.io`). -**Example with containerd:** -This example targets a container in the `k8s.io` namespace using a k3s containerd socket. +**Quick start with containerd (Linux):** ```bash -# Ensure Pumba binary has access to the containerd socket -./pumba --runtime containerd --containerd-address /run/k3s/containerd/containerd.sock --containerd-namespace k8s.io \ - netem --duration 1m my-target-container-name delay --time 500 +# Start a container using containerd's ctr tool +ctr -n demo run -d --name ping docker.io/library/alpine:latest ping 1.1.1.1 + +# Run Pumba against it +pumba --runtime containerd \ + --containerd-address /run/containerd/containerd.sock \ + --containerd-namespace demo \ + netem --duration 30s delay --time 300 ping ``` +On macOS, containerd typically runs inside Docker Desktop. Expose the socket or +run Pumba inside the Docker Desktop VM and use the same command, adjusting the +`--containerd-address` to the VM's socket path. + +See [examples/pumba_containerd_delay.sh](examples/pumba_containerd_delay.sh) for a +scripted demo. + **Note on `stress` command with containerd**: The `stress` command relies on cgroup access. When targeting containerd containers, Pumba attempts to place the `stress-ng` helper container into the target container's cgroup. This requires Pumba to have sufficient privileges to interact with containerd and for the `stress-ng` helper image to be compatible. The default `stress-image` (`alexeiled/stress-ng:latest-ubuntu`) should work if Pumba has appropriate host access or equivalent privileges. ### Kill Container command @@ -645,7 +656,7 @@ DESCRIPTION: OPTIONS: --duration value, -d value stress duration: must be shorter than recurrent interval; use with optional unit suffix: 'ms/s/m/h' --stress-image value Docker image with stress-ng tool, cgroup-bin and docker packages, and dockhack script (default: "alexeiled/stress-ng:latest-ubuntu") - --pull-image pull stress-image form Docker registry + --pull-image pull stress-image from Docker registry --stressors value stress-ng stressors; see https://kernel.ubuntu.com/~cking/stress-ng/ (default: "--cpu 4 --timeout 60s") ``` @@ -1061,7 +1072,7 @@ DESCRIPTION: OPTIONS: --duration value, -d value stress duration: must be shorter than recurrent interval; use with optional unit suffix: 'ms/s/m/h' --stress-image value Docker image with stress-ng tool, cgroup-bin and docker packages, and dockhack script (default: "alexeiled/stress-ng:latest-ubuntu") - --pull-image pull stress-image form Docker registry + --pull-image pull stress-image from Docker registry --stressors value stress-ng stressors; see https://kernel.ubuntu.com/~cking/stress-ng/ (default: "--cpu 4 --timeout 60s") ``` diff --git a/deploy/pumba_kube.yml b/deploy/pumba_kube.yml index e84e982b..fdccd0e6 100644 --- a/deploy/pumba_kube.yml +++ b/deploy/pumba_kube.yml @@ -52,6 +52,9 @@ spec: volumeMounts: - name: dockersocket mountPath: /var/run/docker.sock + # Uncomment for containerd runtime + # - name: containerdsocket + # mountPath: /run/containerd/containerd.sock # randomly add a 3000ms ± 30ms delay to 'test-2' Pod containers every 5m for 2m, where variation in delay is described by `normal` distribution, - image: gaiaadm/pumba imagePullPolicy: Always @@ -86,6 +89,9 @@ spec: volumeMounts: - name: dockersocket mountPath: /var/run/docker.sock + # Uncomment for containerd runtime + # - name: containerdsocket + # mountPath: /run/containerd/containerd.sock # limit to specific k8s nodes # EKS node group # nodeSelector: @@ -97,3 +103,7 @@ spec: - hostPath: path: /var/run/docker.sock name: dockersocket + # Uncomment below to use containerd runtime instead of Docker + # - hostPath: + # path: /run/containerd/containerd.sock + # name: containerdsocket diff --git a/docs/advanced-network-chaos.md b/docs/advanced-network-chaos.md index 292c2321..a1ffc588 100644 --- a/docs/advanced-network-chaos.md +++ b/docs/advanced-network-chaos.md @@ -9,6 +9,16 @@ Pumba now offers support for both outgoing traffic manipulation (using `tc` with **Runtime Note**: Pumba supports both Docker and containerd runtimes. The examples in this guide are applicable to both. Remember to use the global `--runtime` flag (e.g., `--runtime containerd`) and provide necessary runtime-specific options (like `--containerd-address` and `--containerd-namespace` for containerd) when running these commands. If `--runtime` is not specified, Pumba defaults to the Docker runtime. +Example using containerd: +```bash +# Add 200ms delay to a container started with containerd +ctr -n demo run -d --name web docker.io/library/nginx:alpine +pumba --runtime containerd \ + --containerd-address /run/containerd/containerd.sock \ + --containerd-namespace demo \ + netem --duration 30s delay --time 200 web +``` + ![Pumba Network Chaos Testing](img/nettools-diagram.svg) The diagram above illustrates how Pumba uses a single nettools container to manipulate both incoming traffic (via iptables) and outgoing diff --git a/examples/README.md b/examples/README.md index fa023b33..10ae95d8 100644 --- a/examples/README.md +++ b/examples/README.md @@ -18,6 +18,12 @@ 1. Run "ping" container pinging `1.1.1.1`: `./delay_demo.sh` 1. Run `pumba` adding `3000ms ± 20` delay to the "ping" container: `./pumba_delay.sh` +## Delay containerd container + +1. Split screen horizontally +1. Run "ping" container using containerd: `./containerd_delay_demo.sh` +1. Run `pumba` with containerd runtime to add `300ms` delay: see script `./pumba_containerd_delay.sh` + ## Add packet loss to egress traffic 1. Split screen horizontally diff --git a/examples/containerd_delay_demo.sh b/examples/containerd_delay_demo.sh new file mode 100755 index 00000000..80869cd6 --- /dev/null +++ b/examples/containerd_delay_demo.sh @@ -0,0 +1,10 @@ +#!/bin/sh + +set -o xtrace + +# create namespace if not exists +ctr ns create demo 2>/dev/null || true + +# pull and run a simple ping container using containerd +ctr -n demo image pull docker.io/library/alpine:latest +ctr -n demo run -d --name ping docker.io/library/alpine:latest ping 1.1.1.1 diff --git a/examples/pumba_containerd_delay.sh b/examples/pumba_containerd_delay.sh new file mode 100755 index 00000000..ae042596 --- /dev/null +++ b/examples/pumba_containerd_delay.sh @@ -0,0 +1,9 @@ +#!/bin/sh + +set -o xtrace + +pumba --runtime containerd \ + --containerd-address /run/containerd/containerd.sock \ + --containerd-namespace demo \ + --log-level=info --interval=20s \ + netem --duration=10s delay --time=300 ping diff --git a/pkg/container/containerd_client.go b/pkg/container/containerd_client.go index 7373adb2..6d039744 100644 --- a/pkg/container/containerd_client.go +++ b/pkg/container/containerd_client.go @@ -1,10 +1,10 @@ package container import ( - "context" "context" "fmt" "os" + "strconv" "strings" "syscall" "time" @@ -40,6 +40,10 @@ type Client interface { // StressContainer(ctx context.Context, c *Container, stressors []string, image string, pull bool, duration time.Duration, dryrun bool) (string, <-chan string, <-chan error, error) } +// containerdNew is used to create containerd.Client instances. It is a variable +// so tests can replace it with a mock implementation. +var containerdNew = containerd.New + // containerdClient implements the Client interface using containerd. type containerdClient struct { client *containerd.Client @@ -60,7 +64,7 @@ func NewContainerdClient(address, namespace string) (Client, error) { // client, err := containerd.New(address, containerd.WithDefaultPlatform()) // For Pumba, it's often run in Linux an environment matching the target, so default might be fine. // Let's stick to the simpler New(address) for now unless platform issues arise. - client, err := containerd.New(address) + client, err := containerdNew(address) if err != nil { return nil, errors.Wrapf(err, "failed to connect to containerd at %s", address) } @@ -118,13 +122,12 @@ func (c *containerdClient) ListContainers(ctx context.Context, fn FilterFunc, op } // Continue with partial info if spec is unavailable but info is present } - - imageName := info.Image // Default to info.Image + + imageName := info.Image // Default to info.Image if spec != nil && spec.Image != "" { // Prefer spec.Image if available and not empty - imageName = spec.Image + imageName = spec.Image } - // Determine status var statusStr string task, errTask := cont.Task(ctx, nil) // cio.Load is not needed for just status @@ -146,7 +149,7 @@ func (c *containerdClient) ListContainers(ctx context.Context, fn FilterFunc, op statusStr = string(st.Status) } } - + // Filter by opts.All: if false, only include running containers // containerd.Running is the correct status for this check. if !opts.All && statusStr != string(containerd.Running) { @@ -154,7 +157,7 @@ func (c *containerdClient) ListContainers(ctx context.Context, fn FilterFunc, op } // Determine container name (prefer specific labels, fallback to ID) - containerName := cont.ID() // Default to ID + containerName := cont.ID() // Default to ID if name, ok := info.Labels[oci.AnnotationName]; ok && name != "" { // Standard OCI annotation for name containerName = name } else if name, ok := info.Labels["io.kubernetes.cri.container-name"]; ok && name != "" { // K8s CRI specific @@ -163,7 +166,6 @@ func (c *containerdClient) ListContainers(ctx context.Context, fn FilterFunc, op containerName = name } - pumbaC := &Container{ Cid: cont.ID(), Cname: containerName, @@ -174,13 +176,12 @@ func (c *containerdClient) ListContainers(ctx context.Context, fn FilterFunc, op // Populate CstopSignal from labels, using the Labels() method which correctly accesses Clabels. pumbaC.CstopSignal = pumbaC.Labels()[signalLabel] - if fn(pumbaC) { pumbaContainers = append(pumbaContainers, pumbaC) log.WithFields(log.Fields{ - "id": pumbaC.ID(), - "name": pumbaC.Name(), - "image": pumbaC.ImageName(), + "id": pumbaC.ID(), + "name": pumbaC.Name(), + "image": pumbaC.ImageName(), // "labels": pumbaC.Labels(), // Can be verbose "status": pumbaC.Status(), }).Debug("found matching containerd container") @@ -261,14 +262,13 @@ func parseSignal(signalStr string) (syscall.Signal, error) { return syscall.SIGXFSZ, nil default: // Try to parse as an integer if not a known string - if val, err := syscall.Atoi(signalStr); err == nil { + if val, err := strconv.Atoi(signalStr); err == nil { return syscall.Signal(val), nil } return 0, fmt.Errorf("unknown signal: %s", signalStr) } } - // StopContainer stops a containerd container. // It first sends SIGTERM, waits for the timeout, then sends SIGKILL if the container hasn't stopped. func (c *containerdClient) StopContainer(ctx context.Context, pumbaContainer *Container, timeout int, dryrun bool) error { @@ -313,7 +313,7 @@ func (c *containerdClient) StopContainer(ctx context.Context, pumbaContainer *Co log.WithError(err).WithFields(logFields).Warnf("invalid stop signal %s, defaulting to SIGTERM", stopSignalStr) sigterm = syscall.SIGTERM } - + log.WithFields(logFields).Debugf("sending signal %s to task", sigterm) if err := task.Kill(ctx, sigterm); err != nil { // If task is already stopped or stopping, this might error. @@ -323,7 +323,7 @@ func (c *containerdClient) StopContainer(ctx context.Context, pumbaContainer *Co } log.WithFields(logFields).Debugf("task already finished or not found when sending %s", sigterm) } - + // Wait for the task to stop or timeout statusC, err := task.Wait(ctx) if err != nil { @@ -336,9 +336,9 @@ func (c *containerdClient) StopContainer(ctx context.Context, pumbaContainer *Co if _, err := task.Delete(ctx); err != nil { // Log error but don't fail the stop operation if delete fails here, // as the primary goal (stopping) was achieved. - if !errdefs.IsNotFound(err) && !strings.Contains(err.Error(), "process already finished"){ - log.WithError(err).WithFields(logFields).Warn("failed to delete task after stop") - } + if !errdefs.IsNotFound(err) && !strings.Contains(err.Error(), "process already finished") { + log.WithError(err).WithFields(logFields).Warn("failed to delete task after stop") + } } return nil case <-time.After(time.Duration(timeout) * time.Second): @@ -360,9 +360,9 @@ func (c *containerdClient) StopContainer(ctx context.Context, pumbaContainer *Co // The task might be in a zombie state or unkillable. } if _, err := task.Delete(ctx); err != nil { - if !errdefs.IsNotFound(err) && !strings.Contains(err.Error(), "process already finished"){ - log.WithError(err).WithFields(logFields).Warn("failed to delete task after SIGKILL") - } + if !errdefs.IsNotFound(err) && !strings.Contains(err.Error(), "process already finished") { + log.WithError(err).WithFields(logFields).Warn("failed to delete task after SIGKILL") + } } return nil // Or an error if SIGKILL failed to make it stop? Docker client seems to proceed. case <-ctx.Done(): @@ -418,7 +418,7 @@ func (c *containerdClient) KillContainer(ctx context.Context, pumbaContainer *Co } log.WithFields(logFields).Debug("task already finished or not found when sending signal") } - + // For SIGKILL, containerd might not immediately reflect the status change via Wait. // However, Pumba's KillContainer (Docker) doesn't explicitly wait after sending signal. // If signal is SIGKILL, the task might be cleaned up quickly. @@ -483,12 +483,12 @@ func (c *containerdClient) RemoveContainer(ctx context.Context, pumbaContainer * return ctx.Err() } } else if !errdefs.IsNotFound(waitErr) { // Don't log if task already gone - log.WithError(waitErr).WithFields(logFields).Warn("failed to wait for task during force remove") - } + log.WithError(waitErr).WithFields(logFields).Warn("failed to wait for task during force remove") + } // Delete the task if _, err := task.Delete(ctx); err != nil { - if !errdefs.IsNotFound(err) && !strings.Contains(err.Error(), "process already finished"){ // It's okay if the task is already gone + if !errdefs.IsNotFound(err) && !strings.Contains(err.Error(), "process already finished") { // It's okay if the task is already gone log.WithError(err).WithFields(logFields).Warn("failed to delete task during force remove, proceeding with container delete") } } @@ -525,7 +525,6 @@ func (c *containerdClient) RemoveContainer(ctx context.Context, pumbaContainer * return nil } - // Ensure containerdClient implements Client at compile time. var _ Client = (*containerdClient)(nil) @@ -612,7 +611,7 @@ func (c *containerdClient) UnpauseContainer(ctx context.Context, pumbaContainer } return errors.Wrapf(err, "failed to get task for container %s", cont.ID()) } - + status, err := task.Status(ctx) if err != nil { return errors.Wrapf(err, "failed to get task status for container %s before unpausing", cont.ID()) @@ -667,7 +666,7 @@ func (c *containerdClient) ExecContainer(ctx context.Context, pumbaContainer *Co } return errors.Wrapf(err, "failed to get task for container %s for exec", pumbaContainer.ID()) } - + // Ensure task is running before trying to exec taskStatus, err := task.Status(ctx) if err != nil { @@ -689,7 +688,6 @@ func (c *containerdClient) ExecContainer(ctx context.Context, pumbaContainer *Co ociProcessSpec = &oci.Process{Cwd: "/", User: oci.User{UID: 0, GID: 0}} // Basic root default } - execProcessSpec := &containerd.ProcessSpec{ Args: append([]string{command}, args...), Cwd: ociProcessSpec.Cwd, // Use Cwd from container spec @@ -702,7 +700,6 @@ func (c *containerdClient) ExecContainer(ctx context.Context, pumbaContainer *Co execProcessSpec.User = &oci.User{UID: 0, GID: 0} } - execID := uuid.New().String() // Setup stdio for capturing output @@ -719,7 +716,7 @@ func (c *containerdClient) ExecContainer(ctx context.Context, pumbaContainer *Co return errors.Wrap(err, "failed to create stderr pipe for exec") } defer stderrReadPipe.Close() // Close reader in this function - + // Goroutine to capture stdout var execStdout, execStderr strings.Builder stdoutCaptureDone := make(chan struct{}) @@ -740,7 +737,7 @@ func (c *containerdClient) ExecContainer(ctx context.Context, pumbaContainer *Co log.WithError(errCopy).WithFields(logFields).Warn("error copying stderr from exec process") } }() - + // Create the cio.FIFOSet with the pipes. Stdin is nil as Pumba doesn't provide input. ioCreator := cio.NewCreator(cio.WithStreams(nil, stdoutWritePipe, stderrWritePipe)) @@ -769,9 +766,9 @@ func (c *containerdClient) ExecContainer(ctx context.Context, pumbaContainer *Co // Ensure output capture is complete // Closing the write pipes explicitly here ensures the io.Copy goroutines will finish. - // If they were not closed by the time the process exited, this is a final signal. - stdoutWritePipe.Close() - stderrWritePipe.Close() + // If they were not closed by the time the process exited, this is a final signal. + stdoutWritePipe.Close() + stderrWritePipe.Close() <-stdoutCaptureDone <-stderrCaptureDone @@ -782,7 +779,7 @@ func (c *containerdClient) ExecContainer(ctx context.Context, pumbaContainer *Co log.WithError(err).WithFields(logFields).Warnf("failed to delete exec process %s", execID) } } - + log.WithFields(logFields).WithField("stdout", execStdout.String()).WithField("stderr", execStderr.String()).Debug("exec command output") if status.ExitStatus() != 0 { @@ -879,7 +876,7 @@ func (c *containerdClient) StartContainer(ctx context.Context, pumbaContainer *C // If specific IO handling is needed later, this can be changed. // Example: ioCreator := cio.NewCreator(cio.WithStdio) or custom FIFOs. ioCreator := cio.NullIO // Creates /dev/null FIFOs if not already present. - + newTask, err := cont.NewTask(ctx, ioCreator) if err != nil { return errors.Wrapf(err, "failed to create new task for container %s", cont.ID()) @@ -933,7 +930,7 @@ func (c *containerdClient) RestartContainer(ctx context.Context, pumbaContainer } else { log.WithFields(logFields).Info("container stopped successfully as part of restart") } - + // Start the container // The existing StartContainer handles various states (no task, existing task stopped/paused/running) // and includes its own logging. @@ -992,7 +989,7 @@ func (c *containerdClient) StopContainerWithID(ctx context.Context, containerID } return c.StopContainer(ctx, minimalPumbaContainer, int(timeout.Seconds()), false) } - + containerName := containerID // Default to ID if name, ok := info.Labels[oci.AnnotationName]; ok && name != "" { containerName = name @@ -1002,7 +999,6 @@ func (c *containerdClient) StopContainerWithID(ctx context.Context, containerID containerName = name } - // Construct the Pumba Container object needed by StopContainer pumbaC := &Container{ Cid: containerID, @@ -1013,7 +1009,6 @@ func (c *containerdClient) StopContainerWithID(ctx context.Context, containerID } pumbaC.CstopSignal = pumbaC.Labels()[signalLabel] - log.WithFields(logFields).Infof("calling StopContainer for container %s (resolved name: %s)", containerID, pumbaC.Name()) return c.StopContainer(ctx, pumbaC, int(timeout.Seconds()), false) } @@ -1061,11 +1056,10 @@ func (c *containerdClient) runNetworkCmdHelperContainer( return errors.Wrapf(err, "failed to get status for target container task %s", targetPumbaContainer.ID()) } if targetTaskStatus.Status != containerd.Running && targetTaskStatus.Status != containerd.Paused { - // Paused is okay as network namespace exists, though commands effect might be delayed until unpause. + // Paused is okay as network namespace exists, though commands effect might be delayed until unpause. return fmt.Errorf("target container %s task is not running or paused (status: %s)", targetPumbaContainer.ID(), targetTaskStatus.Status) } - // 2. Pull helper image if requested if pullImage { log.WithFields(helperLogFields).Infof("pulling helper image %s", helperImageName) @@ -1084,13 +1078,12 @@ func (c *containerdClient) runNetworkCmdHelperContainer( return errors.Wrapf(err, "helper image %s not found after pull attempt or pull=false", helperImageName) } - // 3. Create Helper Container Spec & Container helperContainerID := "pumba-nethelper-" + targetPumbaContainer.ID() + "-" + uuid.New().String()[:8] helperSnapshotID := "pumba-nethelper-snapshot-" + helperContainerID log.WithFields(helperLogFields).WithField("helper_id", helperContainerID).Info("creating helper container spec") - + // Using WithNewSpec and oci.WithImageConfig to get defaults from the image. // The helper container will just sleep; commands are run via exec. helperSpecOpts := []oci.SpecOpts{ @@ -1116,7 +1109,7 @@ func (c *containerdClient) runNetworkCmdHelperContainer( if err != nil { return errors.Wrapf(err, "failed to create helper container %s", helperContainerID) } - + // Defer cleanup of the helper container and its snapshot defer func() { log.WithFields(helperLogFields).WithField("helper_id", helperContainerID).Info("cleaning up helper container") @@ -1125,7 +1118,6 @@ func (c *containerdClient) runNetworkCmdHelperContainer( } }() - // 4. Create and Start Helper Task (joining target's network namespace) log.WithFields(helperLogFields).WithField("helper_id", helperContainerID).Info("creating and starting helper task") // Use cio.NullIO as we are not interacting with the main 'sleep' task's stdio @@ -1149,7 +1141,6 @@ func (c *containerdClient) runNetworkCmdHelperContainer( } log.WithFields(helperLogFields).WithField("helper_id", helperContainerID).Info("helper task started") - // 5. Execute commands in the helper container's task for i, cmdAndArgs := range commandsToRun { if len(cmdAndArgs) == 0 { @@ -1160,15 +1151,15 @@ func (c *containerdClient) runNetworkCmdHelperContainer( args := cmdAndArgs[1:] execLogFields := helperLogFields execLogFields["command"] = strings.Join(cmdAndArgs, " ") - + log.WithFields(execLogFields).Infof("executing command %d in helper container", i+1) execID := "exec-" + helperContainerID + "-" + uuid.New().String()[:8] - + // Create process spec for exec execSpec := &containerd.ProcessSpec{ Args: cmdAndArgs, - Cwd: "/", // Default CWD for commands + Cwd: "/", // Default CWD for commands User: &oci.User{UID: 0, GID: 0}, // Run as root // Terminal: false, // Default } @@ -1177,12 +1168,15 @@ func (c *containerdClient) runNetworkCmdHelperContainer( var stdoutBuf, stderrBuf strings.Builder stdoutRead, stdoutWrite, _ := os.Pipe() stderrRead, stderrWrite, _ := os.Pipe() - + execIOCreator := cio.NewCreator(cio.WithStreams(nil, stdoutWrite, stderrWrite)) - + execProcess, err := helperTask.Exec(ctx, execID, execSpec, execIOCreator) if err != nil { - stdoutRead.Close(); stdoutWrite.Close(); stderrRead.Close(); stderrWrite.Close() + stdoutRead.Close() + stdoutWrite.Close() + stderrRead.Close() + stderrWrite.Close() return errors.Wrapf(err, "failed to create exec process for command '%s'", cmd) } @@ -1190,21 +1184,26 @@ func (c *containerdClient) runNetworkCmdHelperContainer( wg.Add(2) // For stdout and stderr copying go func() { - defer wg.Done(); defer stdoutRead.Close(); defer stdoutWrite.Close() + defer wg.Done() + defer stdoutRead.Close() + defer stdoutWrite.Close() io.Copy(&stdoutBuf, stdoutRead) }() go func() { - defer wg.Done(); defer stderrRead.Close(); defer stderrWrite.Close() + defer wg.Done() + defer stderrRead.Close() + defer stderrWrite.Close() io.Copy(&stderrBuf, stderrRead) }() if err := execProcess.Start(ctx); err != nil { // Ensure pipes are closed if start fails before goroutines manage them fully - stdoutWrite.Close(); stderrWrite.Close() + stdoutWrite.Close() + stderrWrite.Close() wg.Wait() // Wait for any partial copy to finish return errors.Wrapf(err, "failed to start exec process for command '%s'", cmd) } - + // Close our ends of the write pipes so io.Copy can complete stdoutWrite.Close() stderrWrite.Close() @@ -1215,7 +1214,7 @@ func (c *containerdClient) runNetworkCmdHelperContainer( log.WithError(err).WithFields(execLogFields).Errorf("failed waiting for exec process for command '%s'", cmd) // Continue to delete, then return error } - + if _, delErr := execProcess.Delete(ctx); delErr != nil && !errdefs.IsNotFound(delErr) { log.WithError(delErr).WithFields(execLogFields).Warnf("failed to delete exec process %s", execID) } @@ -1241,7 +1240,6 @@ func (c *containerdClient) runNetworkCmdHelperContainer( return nil } - // NetemContainer applies network emulation rules using tc in a helper container. func (c *containerdClient) NetemContainer(ctx context.Context, pumbaCont *Container, netInterface string, netemCmd []string, ips []*net.IPNet, sports, dports []string, duration time.Duration, tcimage string, pull, dryrun bool) error { @@ -1270,8 +1268,8 @@ func (c *containerdClient) NetemContainer(ctx context.Context, pumbaCont *Contai // This creates a prio qdisc and filters traffic to specific bands. tcCommands = append(tcCommands, []string{"tc", "qdisc", "add", "dev", netInterface, "root", "handle", "1:", "prio"}, - []string{"tc", "qdisc", "add", "dev", netInterface, "parent", "1:1", "handle", "10:", "sfq"}, // band 0 - []string{"tc", "qdisc", "add", "dev", netInterface, "parent", "1:2", "handle", "20:", "sfq"}, // band 1 + []string{"tc", "qdisc", "add", "dev", netInterface, "parent", "1:1", "handle", "10:", "sfq"}, // band 0 + []string{"tc", "qdisc", "add", "dev", netInterface, "parent", "1:2", "handle", "20:", "sfq"}, // band 1 append(append([]string{"tc", "qdisc", "add", "dev", netInterface, "parent", "1:3", "handle", "30:", "netem"}, netemCmd...)), // band 2 with netem ) // Add filters to route traffic to band 2 (1:3) @@ -1352,7 +1350,7 @@ func (c *containerdClient) IPTablesContainer(ctx context.Context, pumbaCont *Con var iptablesCommands [][]string baseCmd := append(cmdPrefix, "-w", "5") // Add wait option to iptables - + if len(srcIPs) == 0 && len(dstIPs) == 0 && len(sports) == 0 && len(dports) == 0 { cmd := baseCmd cmd = append(cmd, cmdSuffix...) @@ -1376,7 +1374,7 @@ func (c *containerdClient) IPTablesContainer(ctx context.Context, pumbaCont *Con // Docker client uses "--sport", but iptables might take "-p tcp --sport" or similar. // Assuming cmdPrefix might contain protocol, or it's a general match. // For simplicity, directly using --sport. Adjust if protocol needed. - cmd = append(cmd, "--sport", sport) + cmd = append(cmd, "--sport", sport) cmd = append(cmd, cmdSuffix...) iptablesCommands = append(iptablesCommands, append([]string{"iptables"}, cmd...)) } @@ -1526,7 +1524,7 @@ func (c *containerdClient) StressContainer( helperSpecOpts := []oci.SpecOpts{ oci.WithImageConfig(img), // Apply image's default config oci.WithHostname(helperContainerID), - oci.WithProcessArgs(stressCmd...), // Set stress-ng as the command + oci.WithProcessArgs(stressCmd...), // Set stress-ng as the command oci.WithLinuxCgroupsPath(targetCgroupPath), // CRITICAL: Place helper in target's cgroup oci.WithAddedCapabilities([]string{ // Capabilities needed by stress-ng specs.LinuxCapabilitySYSAdmin, // Often needed for various stressors @@ -1536,7 +1534,7 @@ func (c *containerdClient) StressContainer( oci.WithMounts([]specs.Mount{ // Mount cgroupfs to allow stress-ng to read it { Destination: "/sys/fs/cgroup", - Type: "cgroup", // Or "bind" if source is /sys/fs/cgroup from host + Type: "cgroup", // Or "bind" if source is /sys/fs/cgroup from host Source: "/sys/fs/cgroup", // This assumes cgroupfs is at /sys/fs/cgroup on the host Options: []string{"ro", "nosuid", "noexec", "nodev"}, }, @@ -1566,12 +1564,12 @@ func (c *containerdClient) StressContainer( log.WithError(delErr).WithFields(logFields).WithField("helper_id", helperContainerID).Warn("failed to delete stress helper container") } }() - + // Setup stdio for capturing output from stress-ng var stdoutBuffer, stderrBuffer strings.Builder stdoutRead, stdoutWrite, _ := os.Pipe() stderrRead, stderrWrite, _ := os.Pipe() - + ioCreator := cio.NewCreator(cio.WithStreams(nil, stdoutWrite, stderrWrite)) // Create and start the helper task @@ -1586,9 +1584,9 @@ func (c *containerdClient) StressContainer( go func() { defer wg.Done(); defer stdoutRead.Close(); io.Copy(&stdoutBuffer, stdoutRead) }() go func() { defer wg.Done(); defer stderrRead.Close(); io.Copy(&stderrBuffer, stderrRead) }() - if startErr := helperTask.Start(goroutineCtx); startErr != nil { - stdoutWrite.Close(); stderrWrite.Close() // Ensure copiers can exit + stdoutWrite.Close() + stderrWrite.Close() // Ensure copiers can exit wg.Wait() errChan <- errors.Wrapf(startErr, "failed to start task for stress helper %s", helperContainerID) return @@ -1608,7 +1606,7 @@ func (c *containerdClient) StressContainer( helperContainerID, stdoutBuffer.String(), stderrBuffer.String()) return } - + exitCode := status.ExitStatus() outputCombined := "Stdout:\n" + stdoutBuffer.String() + "\nStderr:\n" + stderrBuffer.String() log.WithFields(logFields).WithField("helper_id", helperContainerID).Infof("stress helper task exited with code %d. Output:\n%s", exitCode, outputCombined) @@ -1629,7 +1627,7 @@ func (c *containerdClient) StressContainer( // This requires that cmdPrefix is structured to allow easy replacement of -A/-I with -D. func (c *containerdClient) StopIPTablesContainer(ctx context.Context, pumbaCont *Container, cmdPrefix, cmdSuffix []string, srcIPs, dstIPs []*net.IPNet, sports, dports []string, image string, pull, dryrun bool) error { - + // Create a new cmdPrefix for deletion by replacing -A or -I with -D deleteCmdPrefix := make([]string, len(cmdPrefix)) copied := false @@ -1648,22 +1646,21 @@ func (c *containerdClient) StopIPTablesContainer(ctx context.Context, pumbaCont // For now, try to proceed; maybe the prefix is already for deletion or is more complex. } - logFields := log.Fields{ - "id": pumbaCont.ID(), - "name": pumbaCont.Name(), + "id": pumbaCont.ID(), + "name": pumbaCont.Name(), "deleteCmdPrefix": deleteCmdPrefix, - "cmdSuffix": cmdSuffix, - "srcIPs": srcIPs, - "dstIPs": dstIPs, - "sports": sports, - "dports": dports, - "iptablesImage": image, - "pull": pull, - "dryrun": dryrun, + "cmdSuffix": cmdSuffix, + "srcIPs": srcIPs, + "dstIPs": dstIPs, + "sports": sports, + "dports": dports, + "iptablesImage": image, + "pull": pull, + "dryrun": dryrun, } log.WithFields(logFields).Info("stopping iptables rules on container using containerd helper") - + // The logic for generating commands is identical to IPTablesContainer, just with deleteCmdPrefix var iptablesCommands [][]string baseCmd := append(deleteCmdPrefix, "-w", "5") // Add wait option diff --git a/pkg/container/containerd_client_test.go b/pkg/container/containerd_client_test.go index 0d552e8f..70a0644b 100644 --- a/pkg/container/containerd_client_test.go +++ b/pkg/container/containerd_client_test.go @@ -10,18 +10,18 @@ import ( "time" "github.com/containerd/containerd" + gcrpc "github.com/containerd/containerd/api/services/containers/v1" + imagesrpc "github.com/containerd/containerd/api/services/images/v1" + snapshotrpc "github.com/containerd/containerd/api/services/snapshots/v1" + tasksrpc "github.com/containerd/containerd/api/services/tasks/v1" "github.com/containerd/containerd/cio" "github.com/containerd/containerd/containers" "github.com/containerd/containerd/content" - cerrdefs "github.com/containerd/errdefs" "github.com/containerd/containerd/images" "github.com/containerd/containerd/namespaces" "github.com/containerd/containerd/oci" "github.com/containerd/containerd/snapshots" - gcrpc "github.com/containerd/containerd/api/services/containers/v1" - imagesrpc "github.com/containerd/containerd/api/services/images/v1" - snapshotrpc "github.com/containerd/containerd/api/services/snapshots/v1" - tasksrpc "github.com/containerd/containerd/api/services/tasks/v1" + cerrdefs "github.com/containerd/errdefs" "github.com/google/uuid" specs "github.com/opencontainers/runtime-spec/specs-go" @@ -29,7 +29,6 @@ import ( log "github.com/sirupsen/logrus" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" - // Placeholder for actual generated mocks. // For now, we'll use testify's mock.Mock for interfaces like containerd.Container, etc. // and define mock service clients manually if needed for structure. @@ -111,8 +110,9 @@ func (m *MockContainer) Update(ctx context.Context, opts ...containerd.UpdateCon type MockTask struct { mock.Mock } -func (m *MockTask) ID() string { args := m.Called(); return args.String(0) } -func (m *MockTask) Pid() uint32 { args := m.Called(); return args.Get(0).(uint32) } + +func (m *MockTask) ID() string { args := m.Called(); return args.String(0) } +func (m *MockTask) Pid() uint32 { args := m.Called(); return args.Get(0).(uint32) } func (m *MockTask) Start(ctx context.Context) error { args := m.Called(ctx); return args.Error(0) } func (m *MockTask) Delete(ctx context.Context, opts ...containerd.ProcessDeleteOpts) (*containerd.ExitStatus, error) { args := m.Called(ctx, opts) @@ -132,7 +132,7 @@ func (m *MockTask) Wait(ctx context.Context) (<-chan containerd.ExitStatus, erro } return nil, args.Error(1) } -func (m *MockTask) Pause(ctx context.Context) error { args := m.Called(ctx); return args.Error(0) } +func (m *MockTask) Pause(ctx context.Context) error { args := m.Called(ctx); return args.Error(0) } func (m *MockTask) Resume(ctx context.Context) error { args := m.Called(ctx); return args.Error(0) } func (m *MockTask) Status(ctx context.Context) (containerd.Status, error) { args := m.Called(ctx) @@ -146,23 +146,27 @@ func (m *MockTask) Exec(ctx context.Context, id string, spec *oci.Process, creat return nil, args.Error(1) } func (m *MockTask) Pids(ctx context.Context) ([]containerd.ProcessInfo, error) { - args := m.Called(ctx); return args.Get(0).([]containerd.ProcessInfo), args.Error(1) + args := m.Called(ctx) + return args.Get(0).([]containerd.ProcessInfo), args.Error(1) } func (m *MockTask) CloseIO(ctx context.Context) error { args := m.Called(ctx); return args.Error(0) } func (m *MockTask) Resize(ctx context.Context, w, h uint32) error { - args := m.Called(ctx, w, h); return args.Error(0) + args := m.Called(ctx, w, h) + return args.Error(0) } func (m *MockTask) IO() cio.IO { args := m.Called(); return args.Get(0).(cio.IO) } func (m *MockTask) Metrics(ctx context.Context) (*containerd.Metrics, error) { - args := m.Called(ctx); return args.Get(0).(*containerd.Metrics), args.Error(1) + args := m.Called(ctx) + return args.Get(0).(*containerd.Metrics), args.Error(1) } // MockProcess is a mock for containerd.Process type MockProcess struct { mock.Mock } -func (m *MockProcess) ID() string { args := m.Called(); return args.String(0) } -func (m *MockProcess) Pid() uint32 { args := m.Called(); return args.Get(0).(uint32) } + +func (m *MockProcess) ID() string { args := m.Called(); return args.String(0) } +func (m *MockProcess) Pid() uint32 { args := m.Called(); return args.Get(0).(uint32) } func (m *MockProcess) Start(ctx context.Context) error { args := m.Called(ctx); return args.Error(0) } func (m *MockProcess) Delete(ctx context.Context, opts ...containerd.ProcessDeleteOpts) (*containerd.ExitStatus, error) { args := m.Called(ctx, opts) @@ -188,27 +192,33 @@ func (m *MockProcess) Status(ctx context.Context) (containerd.Status, error) { } func (m *MockProcess) CloseIO(ctx context.Context) error { args := m.Called(ctx); return args.Error(0) } func (m *MockProcess) Resize(ctx context.Context, w, h uint32) error { - args := m.Called(ctx, w, h); return args.Error(0) + args := m.Called(ctx, w, h) + return args.Error(0) } func (m *MockProcess) IO() cio.IO { args := m.Called(); return args.Get(0).(cio.IO) } - // MockImage is a mock for containerd.Image type MockImage struct { mock.Mock } + func (m *MockImage) Name() string { args := m.Called(); return args.String(0) } -func (m *MockImage) Target() images.ImageTarget { args := m.Called(); return args.Get(0).(images.ImageTarget) } +func (m *MockImage) Target() images.ImageTarget { + args := m.Called() + return args.Get(0).(images.ImageTarget) +} func (m *MockImage) Config(ctx context.Context) (images.ImageConfig, error) { - args := m.Called(ctx); return args.Get(0).(images.ImageConfig), args.Error(1) + args := m.Called(ctx) + return args.Get(0).(images.ImageConfig), args.Error(1) } -// ... other MockImage methods if needed +// ... other MockImage methods if needed // MockImageService is a mock for ImagesClient (github.com/containerd/containerd/api/services/images/v1.ImagesClient) type MockImageService struct { mock.Mock } + func (m *MockImageService) Get(ctx context.Context, req *imagesrpc.GetImageRequest, opts ...interface{}) (*imagesrpc.GetImageResponse, error) { args := m.Called(ctx, req) // Simplified opts for now return args.Get(0).(*imagesrpc.GetImageResponse), args.Error(1) @@ -230,52 +240,63 @@ func (m *MockImageService) Delete(ctx context.Context, req *imagesrpc.DeleteImag return args.Get(0).(*imagesrpc.DeleteImageResponse), args.Error(1) } - // MockSnapshotter is a mock for snapshots.Snapshotter type MockSnapshotter struct { mock.Mock } + func (m *MockSnapshotter) Stat(ctx context.Context, key string) (snapshots.Info, error) { - args := m.Called(ctx, key); return args.Get(0).(snapshots.Info), args.Error(1) + args := m.Called(ctx, key) + return args.Get(0).(snapshots.Info), args.Error(1) } func (m *MockSnapshotter) Update(ctx context.Context, info snapshots.Info, fieldpaths ...string) (snapshots.Info, error) { - args := m.Called(ctx, info, fieldpaths); return args.Get(0).(snapshots.Info), args.Error(1) + args := m.Called(ctx, info, fieldpaths) + return args.Get(0).(snapshots.Info), args.Error(1) } func (m *MockSnapshotter) Usage(ctx context.Context, key string) (snapshots.Usage, error) { - args := m.Called(ctx, key); return args.Get(0).(snapshots.Usage), args.Error(1) + args := m.Called(ctx, key) + return args.Get(0).(snapshots.Usage), args.Error(1) } func (m *MockSnapshotter) Mounts(ctx context.Context, key string) ([]snapshots.Mount, error) { - args := m.Called(ctx, key); return args.Get(0).([]snapshots.Mount), args.Error(1) + args := m.Called(ctx, key) + return args.Get(0).([]snapshots.Mount), args.Error(1) } func (m *MockSnapshotter) Prepare(ctx context.Context, key, parent string, opts ...snapshots.Opt) ([]snapshots.Mount, error) { - args := m.Called(ctx, key, parent, opts); return args.Get(0).([]snapshots.Mount), args.Error(1) + args := m.Called(ctx, key, parent, opts) + return args.Get(0).([]snapshots.Mount), args.Error(1) } func (m *MockSnapshotter) View(ctx context.Context, key, parent string, opts ...snapshots.Opt) ([]snapshots.Mount, error) { - args := m.Called(ctx, key, parent, opts); return args.Get(0).([]snapshots.Mount), args.Error(1) + args := m.Called(ctx, key, parent, opts) + return args.Get(0).([]snapshots.Mount), args.Error(1) } func (m *MockSnapshotter) Commit(ctx context.Context, name, key string, opts ...snapshots.Opt) error { - args := m.Called(ctx, name, key, opts); return args.Error(0) + args := m.Called(ctx, name, key, opts) + return args.Error(0) } func (m *MockSnapshotter) Remove(ctx context.Context, key string) error { - args := m.Called(ctx, key); return args.Error(0) + args := m.Called(ctx, key) + return args.Error(0) } func (m *MockSnapshotter) Walk(ctx context.Context, fn snapshots.WalkFunc) error { - args := m.Called(ctx, fn); return args.Error(0) + args := m.Called(ctx, fn) + return args.Error(0) } func (m *MockSnapshotter) Close() error { - args := m.Called(); return args.Error(0) + args := m.Called() + return args.Error(0) } - // MockContentStore is a mock for content.Store type MockContentStore struct { mock.Mock } + func (m *MockContentStore) Info(ctx context.Context, dgst string) (content.Info, error) { - args := m.Called(ctx, dgst); return args.Get(0).(content.Info), args.Error(1) + args := m.Called(ctx, dgst) + return args.Get(0).(content.Info), args.Error(1) } -// ... other MockContentStore methods if needed +// ... other MockContentStore methods if needed // This is a simplified mock for the root containerd.Client // It allows us to inject mock services and override direct client methods. @@ -299,26 +320,33 @@ func newMockRootContainerdClient() *MockRootContainerdClient { } } - // Mock the service accessors func (m *MockRootContainerdClient) ContainerService() gcrpc.ContainersClient { args := m.Called() - if svc, ok := args.Get(0).(gcrpc.ContainersClient); ok { return svc } + if svc, ok := args.Get(0).(gcrpc.ContainersClient); ok { + return svc + } return m.mockContainerService } func (m *MockRootContainerdClient) TaskService() tasksrpc.TasksClient { args := m.Called() - if svc, ok := args.Get(0).(tasksrpc.TasksClient); ok { return svc } + if svc, ok := args.Get(0).(tasksrpc.TasksClient); ok { + return svc + } return m.mockTaskService } func (m *MockRootContainerdClient) ImageService() imagesrpc.ImagesClient { args := m.Called() - if svc, ok := args.Get(0).(imagesrpc.ImagesClient); ok { return svc } + if svc, ok := args.Get(0).(imagesrpc.ImagesClient); ok { + return svc + } return m.mockImageService } func (m *MockRootContainerdClient) SnapshotService(snapshotterName string) (snapshots.Snapshotter, error) { args := m.Called(snapshotterName) - if svc, ok := args.Get(0).(snapshots.Snapshotter); ok { return svc, args.Error(1) } + if svc, ok := args.Get(0).(snapshots.Snapshotter); ok { + return svc, args.Error(1) + } if m.mockSnapshotService != nil { if ss, ok := m.mockSnapshotService[snapshotterName]; ok { return ss, args.Error(1) @@ -328,9 +356,12 @@ func (m *MockRootContainerdClient) SnapshotService(snapshotterName string) (snap } func (m *MockRootContainerdClient) ContentStore() content.Store { args := m.Called() - if cs, ok := args.Get(0).(content.Store); ok { return cs } + if cs, ok := args.Get(0).(content.Store); ok { + return cs + } return m.mockContentStore } + // Mock other methods of containerd.Client if they are directly used by containerd_client.go // For example: LoadContainer, GetImage, Pull, etc. These often wrap service calls. func (m *MockRootContainerdClient) LoadContainer(ctx context.Context, id string) (containerd.Container, error) { @@ -399,7 +430,6 @@ func restoreOriginalContainerdNew() { containerdNew = originalNewContainerdClient } - func TestNewContainerdClient(t *testing.T) { // Note: Mocking the package-level containerd.New function is complex and often not feasible // without linker tricks or redesigning the NewContainerdClient to accept a factory. @@ -413,17 +443,17 @@ func TestNewContainerdClient(t *testing.T) { expectedError string }{ { - name: "empty address", - address: "", - namespace: "default", - expectError: true, + name: "empty address", + address: "", + namespace: "default", + expectError: true, expectedError: "containerd address cannot be empty", }, { - name: "empty namespace", - address: "/run/containerd/containerd.sock", - namespace: "", - expectError: true, + name: "empty namespace", + address: "/run/containerd/containerd.sock", + namespace: "", + expectError: true, expectedError: "containerd namespace cannot be empty", }, // Successful case is hard to test without a running containerd or complex mock of containerd.New @@ -436,7 +466,6 @@ func TestNewContainerdClient(t *testing.T) { // setupMockNew: func() { // This setup is conceptual due to containerd.New not being easily mockable // setupMockContainerdNew(func(address string, opts ...containerd.ClientOpt) (*containerd.Client, error) { // // Return a dummy, non-nil client. This client won't be fully functional - Daunting task ahead. // // but allows NewContainerdClient to pass the containerd.New call. // // A truly mocked client that can be used by other methods would be needed for deeper tests. // return &containerd.Client{}, nil // Simplified: ideally a mock client @@ -482,9 +511,8 @@ func TestNewContainerdClient(t *testing.T) { func testContext(ns string) context.Context { return namespaces.WithNamespace(context.Background(), ns) } -// containerdNew is a variable that holds the function to create a new containerd client. -// This allows us to replace it with a mock during tests. -var containerdNew = containerd.New + +// containerdNew is defined in the production code. Tests can override it to inject mocks. // mockExitStatus creates a simple containerd.ExitStatus for testing. func mockExitStatus(code uint32, ts time.Time) containerd.ExitStatus { @@ -507,7 +535,7 @@ func mockExitStatus(code uint32, ts time.Time) containerd.ExitStatus { // log.SetOutput(os.Stderr) // to restore, or a specific file. func TestMain(m *testing.M) { // Disable logging for tests to keep output clean, can be enabled for debugging - log.SetOutput(io.Discard) + log.SetOutput(io.Discard) originalContainerdNewFn := containerdNew // Save original code := m.Run() containerdNew = originalContainerdNewFn // Restore original @@ -582,17 +610,16 @@ func newTestContainerdClient(mockRoot *MockRootContainerdClient, ns string) *con // has the necessary methods mocked (LoadContainer, Containers, Pull, GetImage, NewContainer). return &containerdClient{ - client: (*containerd.Client)(nil), // This will be the actual problem. - // We need to ensure calls go to MockRootContainerdClient. - // This might require a test-specific build of containerdClient - // or making `client` an interface. - // For now, tests will instantiate client and then we'd have to - // imagine `client.client` is our mock. + client: (*containerd.Client)(nil), // This will be the actual problem. + // We need to ensure calls go to MockRootContainerdClient. + // This might require a test-specific build of containerdClient + // or making `client` an interface. + // For now, tests will instantiate client and then we'd have to + // imagine `client.client` is our mock. namespace: ns, } } - func TestContainerdClient_ListContainers(t *testing.T) { ctx := testContext("testns") defaultLabels := map[string]string{"key1": "value1", "pumba": "true"} @@ -621,7 +648,7 @@ func TestContainerdClient_ListContainers(t *testing.T) { mockSetup: func(mockRoot *MockRootContainerdClient, mockCont *MockContainer, mockTask *MockTask) { mockCont.id = "id1" // Set ID for the mock container mockRoot.On("Containers", ctx, mock.AnythingOfType("[]string")).Return([]containerd.Container{mockCont}, nil).Once() - + mockCont.On("ID").Return("id1") // Ensure ID() is mocked on MockContainer mockCont.On("Info", ctx, mock.Anything).Return(containers.Container{ID: "id1", Image: defaultImageName, Labels: defaultLabels}, nil).Once() mockCont.On("Spec", ctx).Return(defaultSpec, nil).Once() @@ -629,7 +656,7 @@ func TestContainerdClient_ListContainers(t *testing.T) { mockTask.On("Status", ctx).Return(containerd.Status{Status: containerd.Running}, nil).Once() }, expectedContainers: 1, - filterFunc: func(c *Container) bool { return true }, + filterFunc: func(c *Container) bool { return true }, }, { name: "one container, stopped, opts.All=false, should be filtered out by status", @@ -645,7 +672,7 @@ func TestContainerdClient_ListContainers(t *testing.T) { mockTask.On("Status", ctx).Return(containerd.Status{Status: containerd.Stopped}, nil).Once() }, expectedContainers: 0, // Filtered out because it's not running and All=false - filterFunc: func(c *Container) bool { return true }, + filterFunc: func(c *Container) bool { return true }, }, { name: "one container, running, but filtered out by FilterFunc", @@ -661,7 +688,7 @@ func TestContainerdClient_ListContainers(t *testing.T) { mockTask.On("Status", ctx).Return(containerd.Status{Status: containerd.Running}, nil).Once() }, expectedContainers: 0, - filterFunc: func(c *Container) bool { return false }, // This filter rejects the container + filterFunc: func(c *Container) bool { return false }, // This filter rejects the container }, { name: "error from client.Containers", @@ -689,7 +716,7 @@ func TestContainerdClient_ListContainers(t *testing.T) { t.Run(tc.name, func(t *testing.T) { mockRootClient := new(MockRootContainerdClient) mockContainerObj := new(MockContainer) // Single mock container obj for simplicity in setup - mockTaskObj := new(MockTask) // Single mock task obj + mockTaskObj := new(MockTask) // Single mock task obj if tc.mockSetup != nil { tc.mockSetup(mockRootClient, mockContainerObj, mockTaskObj) @@ -702,7 +729,7 @@ func TestContainerdClient_ListContainers(t *testing.T) { // This requires containerdClient.client to be exported or a test-specific constructor. // Let's proceed as if it's possible to inject. // A real solution would be to refactor containerdClient to accept an interface that *containerd.Client implements. - + // For the purpose of this test generation, we will manually construct client // and assign the mocked root client. This is NOT how it would work without // modification to containerdClient or using a more complex mocking framework @@ -725,21 +752,20 @@ func TestContainerdClient_ListContainers(t *testing.T) { // The most direct way is to make client.client an interface type that *containerd.Client implements, // and our MockRootContainerdClient also implements. // For now, the test will be structured as if client.client *is* mockRootClient. - + // The test will fail if `client.client` is nil and methods are called on it. // This setup is more of a blueprint due to the direct usage of *containerd.Client. // To proceed, I will assume that `client.client = mockRootClient` is somehow achieved for testing. // This test is therefore more of a design for how it *should* be testable. _ = client // Avoid unused client for now. - + // Actual call would be: // result, err := client.ListContainers(ctx, tc.filterFunc, tc.opts) // For now, skipping execution due to the mocking challenge of client.client itself t.Skipf("Skipping ListContainers test '%s' due to complexity of mocking *containerd.Client directly. Test structure is a blueprint.", tc.name) - // Assertions would be: // if tc.expectedError != "" { // assert.Error(t, err) @@ -765,7 +791,6 @@ func TestContainerdClient_ExecContainer(t *testing.T) { defaultOCIProcess := &oci.Process{Cwd: "/", User: oci.User{UID: 0, GID: 0}} defaultSpec := &oci.Spec{Version: "1.0.2", Process: defaultOCIProcess} - tests := []struct { name string dryRun bool @@ -787,11 +812,11 @@ func TestContainerdClient_ExecContainer(t *testing.T) { // The execID is generated by uuid, so use mock.AnythingOfType or a matcher mockTask.On("Exec", ctx, mock.AnythingOfType("string"), mock.AnythingOfType("*containerd.ProcessSpec"), mock.Anything).Return(mockProc, nil).Once() mockProc.On("Start", ctx).Return(nil).Once() - + exitChan := make(chan containerd.ExitStatus, 1) exitChan <- mockExitStatus(0, time.Now()) // Successful exit mockProc.On("Wait", ctx).Return((<-chan containerd.ExitStatus)(exitChan), nil).Once() - mockProc.On("Delete", ctx).Return(&containerd.ExitStatus{Code: 0}, nil).Once() + mockProc.On("Delete", ctx).Return(&containerd.ExitStatus{Code: 0}, nil).Once() }, }, { @@ -805,7 +830,7 @@ func TestContainerdClient_ExecContainer(t *testing.T) { mockCont.On("Spec", ctx).Return(defaultSpec, nil).Once() mockTask.On("Exec", ctx, mock.AnythingOfType("string"), mock.AnythingOfType("*containerd.ProcessSpec"), mock.Anything).Return(mockProc, nil).Once() mockProc.On("Start", ctx).Return(nil).Once() - + exitChan := make(chan containerd.ExitStatus, 1) exitChan <- mockExitStatus(1, time.Now()) // Failed exit mockProc.On("Wait", ctx).Return((<-chan containerd.ExitStatus)(exitChan), nil).Once() @@ -817,7 +842,8 @@ func TestContainerdClient_ExecContainer(t *testing.T) { name: "dry run", dryRun: true, pumbaCont: &Container{Cid: pumbaContainerID, Cname: pumbaContainerName}, - mockSetup: func(mockRoot *MockRootContainerdClient, mockCont *MockContainer, mockTask *MockTask, mockProc *MockProcess) {}, + mockSetup: func(mockRoot *MockRootContainerdClient, mockCont *MockContainer, mockTask *MockTask, mockProc *MockProcess) { + }, }, { name: "task not running", @@ -835,7 +861,8 @@ func TestContainerdClient_ExecContainer(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { mockRootCtClient := newMockRootContainerdClient() - mockContainer := new(MockContainer); mockContainer.id = pumbaContainerID + mockContainer := new(MockContainer) + mockContainer.id = pumbaContainerID mockTask := new(MockTask) mockProcess := new(MockProcess) tc.mockSetup(mockRootCtClient, mockContainer, mockTask, mockProcess) @@ -916,8 +943,8 @@ func TestContainerdClient_StartContainer(t *testing.T) { mockRoot.On("LoadContainer", ctx, pumbaContainerID).Return(mockCont, nil).Once() mockCont.On("Task", ctx, mock.Anything).Return(oldTask, nil).Once() oldTask.On("Status", ctx).Return(containerd.Status{Status: containerd.Stopped}, nil).Once() - oldTask.On("Delete", ctx).Return(&containerd.ExitStatus{}, nil).Once() - + oldTask.On("Delete", ctx).Return(&containerd.ExitStatus{}, nil).Once() + mockCont.On("NewTask", ctx, mock.AnythingOfType("cio.Creator"), mock.Anything).Return(newTask, nil).Once() newTask.On("Start", ctx).Return(nil).Once() }, @@ -929,7 +956,7 @@ func TestContainerdClient_StartContainer(t *testing.T) { mockSetup: func(mockRoot *MockRootContainerdClient, mockCont *MockContainer, _ *MockTask, newTask *MockTask) { mockRoot.On("LoadContainer", ctx, pumbaContainerID).Return(mockCont, nil).Once() mockCont.On("Task", ctx, mock.Anything).Return(nil, cerrdefs.ErrNotFound).Once() // No existing task - + mockCont.On("NewTask", ctx, mock.AnythingOfType("cio.Creator"), mock.Anything).Return(newTask, nil).Once() newTask.On("Start", ctx).Return(nil).Once() }, @@ -945,9 +972,11 @@ func TestContainerdClient_StartContainer(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { mockRootCtClient := newMockRootContainerdClient() - mockContainer := new(MockContainer); mockContainer.id = pumbaContainerID + mockContainer := new(MockContainer) + mockContainer.id = pumbaContainerID mockTask := new(MockTask) // Represents existing/old task - mockNewTask := new(MockTask); mockNewTask.id = "new-task-id" // Represents newly created task + mockNewTask := new(MockTask) + mockNewTask.id = "new-task-id" // Represents newly created task tc.mockSetup(mockRootCtClient, mockContainer, mockTask, mockNewTask) client := newTestableContainerdClient(mockRootCtClient, "testns") @@ -963,7 +992,7 @@ func TestContainerdClient_StartContainer(t *testing.T) { if !tc.dryRun && tc.expectError == "" { mockContainer.AssertExpectations(t) mockTask.AssertExpectations(t) - mockNewTask.AssertExpectations(t) + mockNewTask.AssertExpectations(t) } }) } @@ -988,9 +1017,9 @@ func TestContainerdClient_RestartContainer(t *testing.T) { pumbaCont: &Container{Cid: pumbaContainerID, Cname: pumbaContainerName, Clabels: map[string]string{}}, mockSetup: func(mockRoot *MockRootContainerdClient, mockCont *MockContainer, mockStopTask *MockTask, mockStartTask *MockTask) { // --- Mocks for StopContainer part --- - mockRoot.On("LoadContainer", ctx, pumbaContainerID).Return(mockCont, nil).Twice() - mockCont.On("Task", ctx, mock.Anything).Return(mockStopTask, nil).Once() - + mockRoot.On("LoadContainer", ctx, pumbaContainerID).Return(mockCont, nil).Twice() + mockCont.On("Task", ctx, mock.Anything).Return(mockStopTask, nil).Once() + stopExitChan := make(chan containerd.ExitStatus, 1) stopExitChan <- mockExitStatus(0, time.Now()) mockStopTask.On("Kill", ctx, syscall.SIGTERM).Return(nil).Once() @@ -998,7 +1027,7 @@ func TestContainerdClient_RestartContainer(t *testing.T) { mockStopTask.On("Delete", ctx).Return(&containerd.ExitStatus{}, nil).Once() // --- Mocks for StartContainer part (assuming task was successfully stopped and deleted) --- - mockCont.On("Task", ctx, mock.Anything).Return(nil, cerrdefs.ErrNotFound).Once() + mockCont.On("Task", ctx, mock.Anything).Return(nil, cerrdefs.ErrNotFound).Once() mockCont.On("NewTask", ctx, mock.AnythingOfType("cio.Creator"), mock.Anything).Return(mockStartTask, nil).Once() mockStartTask.On("Start", ctx).Return(nil).Once() }, @@ -1015,9 +1044,11 @@ func TestContainerdClient_RestartContainer(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { mockRootCtClient := newMockRootContainerdClient() - mockContainer := new(MockContainer); mockContainer.id = pumbaContainerID + mockContainer := new(MockContainer) + mockContainer.id = pumbaContainerID mockStopTask := new(MockTask) - mockStartTask := new(MockTask); mockStartTask.id = "start-task-id" + mockStartTask := new(MockTask) + mockStartTask.id = "start-task-id" tc.mockSetup(mockRootCtClient, mockContainer, mockStopTask, mockStartTask) client := newTestableContainerdClient(mockRootCtClient, "testns") @@ -1042,9 +1073,9 @@ func TestContainerdClient_RestartContainer(t *testing.T) { func TestContainerdClient_StopContainerWithID(t *testing.T) { ctx := testContext("testns") targetContainerID := "stop-by-id" // Renamed to avoid conflict - containerName := "stop-by-id-name" + containerName := "stop-by-id-name" timeoutDuration := 5 * time.Second // Renamed - defaultLabels := map[string]string{oci.AnnotationName: containerName} + defaultLabels := map[string]string{oci.AnnotationName: containerName} tests := []struct { name string @@ -1056,14 +1087,14 @@ func TestContainerdClient_StopContainerWithID(t *testing.T) { name: "successful stop by ID", dryRun: false, mockSetup: func(mockRoot *MockRootContainerdClient, mockCont *MockContainer, mockTask *MockTask) { - mockRoot.On("LoadContainer", ctx, targetContainerID).Return(mockCont, nil).Once() + mockRoot.On("LoadContainer", ctx, targetContainerID).Return(mockCont, nil).Once() mockCont.On("Info", ctx, mock.Anything).Return(containers.Container{ID: targetContainerID, Labels: defaultLabels}, nil).Once() - + mockCont.On("Task", ctx, mock.Anything).Return(mockTask, nil).Once() - + exitChan := make(chan containerd.ExitStatus, 1) exitChan <- mockExitStatus(0, time.Now()) - mockTask.On("Kill", ctx, syscall.SIGTERM).Return(nil).Once() + mockTask.On("Kill", ctx, syscall.SIGTERM).Return(nil).Once() mockTask.On("Wait", ctx).Return((<-chan containerd.ExitStatus)(exitChan), nil).Once() mockTask.On("Delete", ctx).Return(&containerd.ExitStatus{}, nil).Once() }, @@ -1072,7 +1103,7 @@ func TestContainerdClient_StopContainerWithID(t *testing.T) { name: "dry run", dryRun: true, mockSetup: func(mockRoot *MockRootContainerdClient, mockCont *MockContainer, mockTask *MockTask) { - mockRoot.On("LoadContainer", ctx, targetContainerID).Return(mockCont, nil).Maybe() + mockRoot.On("LoadContainer", ctx, targetContainerID).Return(mockCont, nil).Maybe() mockCont.On("Info", ctx, mock.Anything).Return(containers.Container{ID: targetContainerID, Labels: defaultLabels}, nil).Maybe() }, }, @@ -1089,7 +1120,8 @@ func TestContainerdClient_StopContainerWithID(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { mockRootCtClient := newMockRootContainerdClient() - mockContainer := new(MockContainer); mockContainer.id = targetContainerID + mockContainer := new(MockContainer) + mockContainer.id = targetContainerID mockTask := new(MockTask) tc.mockSetup(mockRootCtClient, mockContainer, mockTask) @@ -1144,9 +1176,9 @@ func TestContainerdClient_NetemContainer(t *testing.T) { expectedCommands: [][]string{{"tc", "qdisc", "add", "dev", netIface, "root", "netem", "delay", "100ms"}}, }, { - name: "simple delay, actual run", - netemCmd: []string{"delay", "100ms"}, - dryRun: false, + name: "simple delay, actual run", + netemCmd: []string{"delay", "100ms"}, + dryRun: false, pullImage: true, mockHelperSetup: func(mockRoot *MockRootContainerdClient, mockTargetCont *MockContainer, mockTargetTask *MockTask, mockHelperCont *MockContainer, mockHelperTask *MockTask) { // Target container setup @@ -1158,7 +1190,7 @@ func TestContainerdClient_NetemContainer(t *testing.T) { // Helper image pull and get mockRoot.On("Pull", ctx, helperImage, mock.Anything).Return(mockHelperImage, nil).Once() mockRoot.On("GetImage", ctx, helperImage).Return(mockHelperImage, nil).Once() - + // Helper container creation mockTargetCont.On("Spec", ctx).Return(&oci.Spec{Linux: &specs.Linux{}}, nil) // For skip label annotation mockRoot.On("NewContainer", ctx, mock.AnythingOfType("string"), mock.Anything, mock.Anything).Return(mockHelperCont, nil).Once() @@ -1175,7 +1207,8 @@ func TestContainerdClient_NetemContainer(t *testing.T) { return assert.ObjectsAreEqualValues(append([]string{"tc"}, "qdisc", "add", "dev", netIface, "root", "netem", "delay", "100ms"), spec.Args) }), mock.Anything).Return(mockExecProc, nil).Once() mockExecProc.On("Start", ctx).Return(nil).Once() - exitChan := make(chan containerd.ExitStatus, 1); exitChan <- mockExitStatus(0, time.Now()) + exitChan := make(chan containerd.ExitStatus, 1) + exitChan <- mockExitStatus(0, time.Now()) mockExecProc.On("Wait", ctx).Return((<-chan containerd.ExitStatus)(exitChan), nil).Once() mockExecProc.On("Delete", ctx).Return(&containerd.ExitStatus{}, nil).Once() }, @@ -1187,9 +1220,11 @@ func TestContainerdClient_NetemContainer(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { mockRootCtClient := newMockRootContainerdClient() - mockTargetContainer := new(MockContainer); mockTargetContainer.id = targetContainerID + mockTargetContainer := new(MockContainer) + mockTargetContainer.id = targetContainerID mockTargetTask := new(MockTask) - mockHelperContainer := new(MockContainer); mockHelperContainer.id = "helper-id" // Prevent issues if ID() is called on it + mockHelperContainer := new(MockContainer) + mockHelperContainer.id = "helper-id" // Prevent issues if ID() is called on it mockHelperTask := new(MockTask) if tc.mockHelperSetup != nil { @@ -1217,7 +1252,6 @@ func TestContainerdClient_NetemContainer(t *testing.T) { } } - func TestContainerdClient_StopNetemContainer(t *testing.T) { ctx := testContext("testns") targetContainerID := "stop-netem-target-id" @@ -1228,7 +1262,6 @@ func TestContainerdClient_StopNetemContainer(t *testing.T) { mockHelperImage := new(MockImage) mockHelperImage.On("Config", mock.Anything).Return(images.ImageConfig{}, nil) - tests := []struct { name string ips []*net.IPNet // Used to determine which tc command is generated @@ -1241,12 +1274,13 @@ func TestContainerdClient_StopNetemContainer(t *testing.T) { { name: "simple stop netem, dry run", dryRun: true, - mockHelperSetup: func(mockRoot *MockRootContainerdClient, mockTargetCont *MockContainer, mockTargetTask *MockTask, mockHelperCont *MockContainer, mockHelperTask *MockTask) {}, + mockHelperSetup: func(mockRoot *MockRootContainerdClient, mockTargetCont *MockContainer, mockTargetTask *MockTask, mockHelperCont *MockContainer, mockHelperTask *MockTask) { + }, expectedCommands: [][]string{{"tc", "qdisc", "del", "dev", netIface, "root", "netem"}}, }, { - name: "simple stop netem, actual run", - dryRun: false, + name: "simple stop netem, actual run", + dryRun: false, pullImage: false, mockHelperSetup: func(mockRoot *MockRootContainerdClient, mockTargetCont *MockContainer, mockTargetTask *MockTask, mockHelperCont *MockContainer, mockHelperTask *MockTask) { mockRoot.On("LoadContainer", ctx, targetContainerID).Return(mockTargetCont, nil).Once() @@ -1255,7 +1289,7 @@ func TestContainerdClient_StopNetemContainer(t *testing.T) { mockTargetTask.On("Pid").Return(uint32(1234)).Once() mockRoot.On("GetImage", ctx, helperImage).Return(mockHelperImage, nil).Once() // pull = false - + mockTargetCont.On("Spec", ctx).Return(&oci.Spec{Linux: &specs.Linux{}}, nil) mockRoot.On("NewContainer", ctx, mock.AnythingOfType("string"), mock.Anything, mock.Anything).Return(mockHelperCont, nil).Once() mockHelperCont.On("Delete", mock.Anything, mock.Anything).Return(nil).Once() @@ -1269,7 +1303,8 @@ func TestContainerdClient_StopNetemContainer(t *testing.T) { return assert.ObjectsAreEqualValues(append([]string{"tc"}, "qdisc", "del", "dev", netIface, "root", "netem"), spec.Args) }), mock.Anything).Return(mockExecProc, nil).Once() mockExecProc.On("Start", ctx).Return(nil).Once() - exitChan := make(chan containerd.ExitStatus, 1); exitChan <- mockExitStatus(0, time.Now()) + exitChan := make(chan containerd.ExitStatus, 1) + exitChan <- mockExitStatus(0, time.Now()) mockExecProc.On("Wait", ctx).Return((<-chan containerd.ExitStatus)(exitChan), nil).Once() mockExecProc.On("Delete", ctx).Return(&containerd.ExitStatus{}, nil).Once() }, @@ -1277,7 +1312,7 @@ func TestContainerdClient_StopNetemContainer(t *testing.T) { }, { name: "stop netem with IP filters (deletes prio qdisc)", - ips: []*net.IPNet{{IP: []byte{192,168,0,1}, Mask: []byte{255,255,255,0}}}, + ips: []*net.IPNet{{IP: []byte{192, 168, 0, 1}, Mask: []byte{255, 255, 255, 0}}}, dryRun: false, mockHelperSetup: func(mockRoot *MockRootContainerdClient, mockTargetCont *MockContainer, mockTargetTask *MockTask, mockHelperCont *MockContainer, mockHelperTask *MockTask) { mockRoot.On("LoadContainer", ctx, targetContainerID).Return(mockTargetCont, nil).Once() @@ -1296,7 +1331,8 @@ func TestContainerdClient_StopNetemContainer(t *testing.T) { return assert.ObjectsAreEqualValues(append([]string{"tc"}, "qdisc", "del", "dev", netIface, "root", "handle", "1:", "prio"), spec.Args) }), mock.Anything).Return(mockExecProc, nil).Once() mockExecProc.On("Start", ctx).Return(nil).Once() - exitChan := make(chan containerd.ExitStatus, 1); exitChan <- mockExitStatus(0, time.Now()) + exitChan := make(chan containerd.ExitStatus, 1) + exitChan <- mockExitStatus(0, time.Now()) mockExecProc.On("Wait", ctx).Return((<-chan containerd.ExitStatus)(exitChan), nil).Once() mockExecProc.On("Delete", ctx).Return(&containerd.ExitStatus{}, nil).Once() }, @@ -1307,9 +1343,11 @@ func TestContainerdClient_StopNetemContainer(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { mockRootCtClient := newMockRootContainerdClient() - mockTargetContainer := new(MockContainer); mockTargetContainer.id = targetContainerID + mockTargetContainer := new(MockContainer) + mockTargetContainer.id = targetContainerID mockTargetTask := new(MockTask) - mockHelperContainer := new(MockContainer); mockHelperContainer.id = "helper-stop-id" + mockHelperContainer := new(MockContainer) + mockHelperContainer.id = "helper-stop-id" mockHelperTask := new(MockTask) if tc.mockHelperSetup != nil { @@ -1341,7 +1379,7 @@ func TestContainerdClient_IPTablesContainer(t *testing.T) { targetContainerID := "iptables-target-id" targetPumbaCont := &Container{Cid: targetContainerID, Cname: "iptables-target"} helperImage := "alpine/iptables" // Example image - + mockHelperImg := new(MockImage) // Renamed to avoid conflict mockHelperImg.On("Config", mock.Anything).Return(images.ImageConfig{}, nil) @@ -1361,7 +1399,8 @@ func TestContainerdClient_IPTablesContainer(t *testing.T) { cmdPrefix: []string{"-A", "INPUT"}, cmdSuffix: []string{"-j", "DROP"}, dryRun: true, - mockHelperSetup: func(mockRoot *MockRootContainerdClient, mockTargetCont *MockContainer, mockTargetTask *MockTask, mockHelperCont *MockContainer, mockHelperTask *MockTask) {}, + mockHelperSetup: func(mockRoot *MockRootContainerdClient, mockTargetCont *MockContainer, mockTargetTask *MockTask, mockHelperCont *MockContainer, mockHelperTask *MockTask) { + }, expectedCommands: [][]string{{"iptables", "-A", "INPUT", "-w", "5", "-j", "DROP"}}, }, { @@ -1388,7 +1427,8 @@ func TestContainerdClient_IPTablesContainer(t *testing.T) { return assert.ObjectsAreEqualValues(append([]string{"iptables"}, "-A", "INPUT", "-w", "5", "-j", "DROP"), spec.Args) }), mock.Anything).Return(mockExecProc, nil).Once() mockExecProc.On("Start", ctx).Return(nil).Once() - exitChan := make(chan containerd.ExitStatus, 1); exitChan <- mockExitStatus(0, time.Now()) + exitChan := make(chan containerd.ExitStatus, 1) + exitChan <- mockExitStatus(0, time.Now()) mockExecProc.On("Wait", ctx).Return((<-chan containerd.ExitStatus)(exitChan), nil).Once() mockExecProc.On("Delete", ctx).Return(&containerd.ExitStatus{}, nil).Once() }, @@ -1399,9 +1439,11 @@ func TestContainerdClient_IPTablesContainer(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { mockRootCtClient := newMockRootContainerdClient() - mockTargetContainer := new(MockContainer); mockTargetContainer.id = targetContainerID + mockTargetContainer := new(MockContainer) + mockTargetContainer.id = targetContainerID mockTargetTask := new(MockTask) - mockHelperContainer := new(MockContainer); mockHelperContainer.id = "helper-iptables-id" + mockHelperContainer := new(MockContainer) + mockHelperContainer.id = "helper-iptables-id" mockHelperTask := new(MockTask) if tc.mockHelperSetup != nil { @@ -1434,7 +1476,8 @@ func TestContainerdClient_StopIPTablesContainer(t *testing.T) { targetPumbaCont := &Container{Cid: targetContainerID, Cname: "stop-iptables-target"} helperImage := "alpine/iptables" - mockHelperImg := new(MockImage); mockHelperImg.On("Config", mock.Anything).Return(images.ImageConfig{}, nil) + mockHelperImg := new(MockImage) + mockHelperImg.On("Config", mock.Anything).Return(images.ImageConfig{}, nil) tests := []struct { name string @@ -1450,7 +1493,8 @@ func TestContainerdClient_StopIPTablesContainer(t *testing.T) { cmdPrefix: []string{"-A", "INPUT"}, cmdSuffix: []string{"-j", "DROP"}, dryRun: true, - mockHelperSetup: func(mockRoot *MockRootContainerdClient, mockTargetCont *MockContainer, mockTargetTask *MockTask, mockHelperCont *MockContainer, mockHelperTask *MockTask) {}, + mockHelperSetup: func(mockRoot *MockRootContainerdClient, mockTargetCont *MockContainer, mockTargetTask *MockTask, mockHelperCont *MockContainer, mockHelperTask *MockTask) { + }, expectedCommands: [][]string{{"iptables", "-D", "INPUT", "-w", "5", "-j", "DROP"}}, }, { @@ -1475,7 +1519,8 @@ func TestContainerdClient_StopIPTablesContainer(t *testing.T) { return assert.ObjectsAreEqualValues(append([]string{"iptables"}, "-D", "OUTPUT", "1", "-w", "5", "-p", "tcp", "--dport", "80", "-j", "REJECT"), spec.Args) }), mock.Anything).Return(mockExecProc, nil).Once() mockExecProc.On("Start", ctx).Return(nil).Once() - exitChan := make(chan containerd.ExitStatus, 1); exitChan <- mockExitStatus(0, time.Now()) + exitChan := make(chan containerd.ExitStatus, 1) + exitChan <- mockExitStatus(0, time.Now()) mockExecProc.On("Wait", ctx).Return((<-chan containerd.ExitStatus)(exitChan), nil).Once() mockExecProc.On("Delete", ctx).Return(&containerd.ExitStatus{}, nil).Once() }, @@ -1486,9 +1531,11 @@ func TestContainerdClient_StopIPTablesContainer(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { mockRootCtClient := newMockRootContainerdClient() - mockTargetContainer := new(MockContainer); mockTargetContainer.id = targetContainerID + mockTargetContainer := new(MockContainer) + mockTargetContainer.id = targetContainerID mockTargetTask := new(MockTask) - mockHelperContainer := new(MockContainer); mockHelperContainer.id = "helper-stop-iptables-id" + mockHelperContainer := new(MockContainer) + mockHelperContainer.id = "helper-stop-iptables-id" mockHelperTask := new(MockTask) if tc.mockHelperSetup != nil { @@ -1528,7 +1575,6 @@ func TestContainerdClient_StressContainer(t *testing.T) { mockStressHelperImage.On("Name").Return(helperImageName) // Needed for OCI spec opts mockStressHelperImage.On("Config", mock.Anything).Return(images.ImageConfig{}, nil) - tests := []struct { name string dryRun bool @@ -1562,7 +1608,7 @@ func TestContainerdClient_StressContainer(t *testing.T) { exitStatusChan := make(chan containerd.ExitStatus, 1) exitStatusChan <- mockExitStatus(0, time.Now()) // stress-ng success mockHelperTask.On("Wait", testContext(testNs)).Return((<-chan containerd.ExitStatus)(exitStatusChan), nil).Once() - + // Defer functions mockHelperCont.On("Delete", mock.Anything, mock.AnythingOfType("containerd.DeleteOpts")).Return(nil).Once() // WithTaskDeleteOnExit is used, so no explicit mockHelperTask.Delete() expected here by main path @@ -1576,8 +1622,8 @@ func TestContainerdClient_StressContainer(t *testing.T) { }, }, { - name: "target container not running", - dryRun: false, + name: "target container not running", + dryRun: false, mockSetup: func(mockRoot *MockRootContainerdClient, mockTargetCont *MockContainer, mockTargetTask *MockTask, mockHelperCont *MockContainer, mockHelperTask *MockTask) { mockRoot.On("LoadContainer", testContext(testNs), targetContainerID).Return(mockTargetCont, nil).Once() mockTargetCont.On("Task", testContext(testNs), mock.Anything).Return(nil, cerrdefs.ErrNotFound).Once() @@ -1605,17 +1651,19 @@ func TestContainerdClient_StressContainer(t *testing.T) { mockHelperTask.On("Wait", testContext(testNs)).Return((<-chan containerd.ExitStatus)(exitStatusChan), nil).Once() mockHelperCont.On("Delete", mock.Anything, mock.AnythingOfType("containerd.DeleteOpts")).Return(nil).Once() }, - expectErrorChan: true, // Error will come from the goroutine via errChan - finalErrorMsg: "stress-ng helper", // Part of the error message + expectErrorChan: true, // Error will come from the goroutine via errChan + finalErrorMsg: "stress-ng helper", // Part of the error message }, } for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { mockRootCtClient := newMockRootContainerdClient() - mockTargetContainer := new(MockContainer); mockTargetContainer.id = targetContainerID + mockTargetContainer := new(MockContainer) + mockTargetContainer.id = targetContainerID mockTargetTask := new(MockTask) - mockHelperContainer := new(MockContainer); mockHelperContainer.id = "stress-helper-id" + mockHelperContainer := new(MockContainer) + mockHelperContainer.id = "stress-helper-id" mockHelperTask := new(MockTask) if tc.mockSetup != nil { @@ -1623,7 +1671,7 @@ func TestContainerdClient_StressContainer(t *testing.T) { } client := newTestableContainerdClient(mockRootCtClient, testNs) // Use testNs here - + // Use a new context for the StressContainer call itself, as originalCtx is passed to the goroutine callCtx, cancel := context.WithTimeout(context.Background(), 15*time.Second) // Test timeout defer cancel() @@ -1668,7 +1716,7 @@ func TestContainerdClient_StressContainer(t *testing.T) { } } } - + // Assert mock calls after handling channels, especially for non-dry-run, no-sync-error cases mockRootCtClient.AssertExpectations(t) if !tc.dryRun && tc.expectedError == "" { @@ -1683,6 +1731,7 @@ func TestContainerdClient_StressContainer(t *testing.T) { }) } } + // Further tests would follow a similar pattern, adapting the mocks for each method. // For helper container methods, the mocking would be more involved: // - Mock Pull, GetImage, NewContainer, container.Spec, container.NewTask, task.Start, task.Exec, process.Start, process.Wait, process.Delete, task.Delete, container.Delete. @@ -1709,16 +1758,16 @@ func TestContainerdClient_StopContainer(t *testing.T) { defaultTimeout := 5 tests := []struct { - name string - dryRun bool - customSignal string // Label for custom signal - mockSetup func(mockRoot *MockRootContainerdClient, mockCont *MockContainer, mockTask *MockTask) - expectError string - pumbaCont *Container + name string + dryRun bool + customSignal string // Label for custom signal + mockSetup func(mockRoot *MockRootContainerdClient, mockCont *MockContainer, mockTask *MockTask) + expectError string + pumbaCont *Container }{ { - name: "successful stop with SIGTERM", - dryRun: false, + name: "successful stop with SIGTERM", + dryRun: false, pumbaCont: &Container{Cid: pumbaContainerID, Cname: pumbaContainerName, Clabels: map[string]string{}}, mockSetup: func(mockRoot *MockRootContainerdClient, mockCont *MockContainer, mockTask *MockTask) { mockRoot.On("LoadContainer", ctx, pumbaContainerID).Return(mockCont, nil).Once() @@ -1730,23 +1779,23 @@ func TestContainerdClient_StopContainer(t *testing.T) { exitChan := make(chan containerd.ExitStatus, 1) exitChan <- mockExitStatus(0, time.Now()) // Success exit - + mockTask.On("Kill", ctx, syscall.SIGTERM).Return(nil).Once() mockTask.On("Wait", ctx).Return((<-chan containerd.ExitStatus)(exitChan), nil).Once() mockTask.On("Delete", ctx).Return(&containerd.ExitStatus{}, nil).Once() }, }, { - name: "stop with timeout, SIGKILL issued", - dryRun: false, + name: "stop with timeout, SIGKILL issued", + dryRun: false, pumbaCont: &Container{Cid: pumbaContainerID, Cname: pumbaContainerName, Clabels: map[string]string{}}, mockSetup: func(mockRoot *MockRootContainerdClient, mockCont *MockContainer, mockTask *MockTask) { mockRoot.On("LoadContainer", ctx, pumbaContainerID).Return(mockCont, nil).Once() mockCont.On("Task", ctx, mock.Anything).Return(mockTask, nil).Once() - exitChan := make(chan containerd.ExitStatus) // Unbuffered, Kill will timeout first + exitChan := make(chan containerd.ExitStatus) // Unbuffered, Kill will timeout first sigKillExitChan := make(chan containerd.ExitStatus, 1) // For after SIGKILL - + mockTask.On("Kill", ctx, syscall.SIGTERM).Return(nil).Once() // Wait will be called, but timeout will occur. Then Kill(SIGKILL) mockTask.On("Wait", ctx).Return((<-chan containerd.ExitStatus)(exitChan), nil).Once().Run(func(args mock.Arguments) { @@ -1759,16 +1808,16 @@ func TestContainerdClient_StopContainer(t *testing.T) { }, }, { - name: "dry run", - dryRun: true, + name: "dry run", + dryRun: true, pumbaCont: &Container{Cid: pumbaContainerID, Cname: pumbaContainerName}, mockSetup: func(mockRoot *MockRootContainerdClient, mockCont *MockContainer, mockTask *MockTask) { // No calls to containerd client expected in dry run }, }, { - name: "container not found", - dryRun: false, + name: "container not found", + dryRun: false, pumbaCont: &Container{Cid: pumbaContainerID, Cname: pumbaContainerName}, mockSetup: func(mockRoot *MockRootContainerdClient, mockCont *MockContainer, mockTask *MockTask) { mockRoot.On("LoadContainer", ctx, pumbaContainerID).Return(nil, cerrdefs.ErrNotFound).Once() @@ -1776,8 +1825,8 @@ func TestContainerdClient_StopContainer(t *testing.T) { expectError: "container test-container-id not found: not found", }, { - name: "task not found (already stopped)", - dryRun: false, + name: "task not found (already stopped)", + dryRun: false, pumbaCont: &Container{Cid: pumbaContainerID, Cname: pumbaContainerName}, mockSetup: func(mockRoot *MockRootContainerdClient, mockCont *MockContainer, mockTask *MockTask) { mockRoot.On("LoadContainer", ctx, pumbaContainerID).Return(mockCont, nil).Once() @@ -1785,25 +1834,25 @@ func TestContainerdClient_StopContainer(t *testing.T) { }, }, { - name: "custom stop signal from label", - dryRun: false, + name: "custom stop signal from label", + dryRun: false, customSignal: "SIGINT", - pumbaCont: &Container{Cid: pumbaContainerID, Cname: pumbaContainerName, Clabels: map[string]string{signalLabel: "SIGINT"}}, + pumbaCont: &Container{Cid: pumbaContainerID, Cname: pumbaContainerName, Clabels: map[string]string{signalLabel: "SIGINT"}}, mockSetup: func(mockRoot *MockRootContainerdClient, mockCont *MockContainer, mockTask *MockTask) { mockRoot.On("LoadContainer", ctx, pumbaContainerID).Return(mockCont, nil).Once() mockCont.On("Task", ctx, mock.Anything).Return(mockTask, nil).Once() - + exitChan := make(chan containerd.ExitStatus, 1) exitChan <- mockExitStatus(0, time.Now()) - + mockTask.On("Kill", ctx, syscall.SIGINT).Return(nil).Once() // Expect SIGINT mockTask.On("Wait", ctx).Return((<-chan containerd.ExitStatus)(exitChan), nil).Once() mockTask.On("Delete", ctx).Return(&containerd.ExitStatus{}, nil).Once() }, }, { - name: "error on task.Kill (SIGTERM)", - dryRun: false, + name: "error on task.Kill (SIGTERM)", + dryRun: false, pumbaCont: &Container{Cid: pumbaContainerID, Cname: pumbaContainerName, Clabels: map[string]string{}}, mockSetup: func(mockRoot *MockRootContainerdClient, mockCont *MockContainer, mockTask *MockTask) { mockRoot.On("LoadContainer", ctx, pumbaContainerID).Return(mockCont, nil).Once() @@ -1826,14 +1875,13 @@ func TestContainerdClient_StopContainer(t *testing.T) { } client := newTestableContainerdClient(mockRootCtClient, "testns") - + // Ensure pumbaCont is not nil for the test run currentPumbaCont := tc.pumbaCont if currentPumbaCont == nil { // Should not happen if test cases are defined correctly t.Fatal("pumbaCont is nil in test case") } - err := client.StopContainer(ctx, currentPumbaCont, defaultTimeout, tc.dryRun) if tc.expectError != "" { @@ -1859,7 +1907,6 @@ func TestContainerdClient_StopContainer(t *testing.T) { } } - func TestContainerdClient_KillContainer(t *testing.T) { ctx := testContext("testns") pumbaContainerID := "kill-id" @@ -1966,16 +2013,26 @@ func TestContainerdClient_KillContainer(t *testing.T) { mockRootCtClient.AssertExpectations(t) // Assert underlying mocks only if not dry run and no error was expected at a higher level (like signal parsing) - if !tc.dryRun && - (tc.expectError == "" || - (tc.expectError != "" && !strings.Contains(tc.expectError, "invalid signal")) ) { + if !tc.dryRun && + (tc.expectError == "" || + (tc.expectError != "" && !strings.Contains(tc.expectError, "invalid signal"))) { // If LoadContainer was expected to be called wasLoadCalled := false - for _, call := range mockRootCtClient.Calls { if call.Method == "LoadContainer" { wasLoadCalled = true; break } } + for _, call := range mockRootCtClient.Calls { + if call.Method == "LoadContainer" { + wasLoadCalled = true + break + } + } if wasLoadCalled { mockContainer.AssertExpectations(t) wasTaskCalled := false - for _, call := range mockContainer.Calls { if call.Method == "Task" { wasTaskCalled = true; break } } + for _, call := range mockContainer.Calls { + if call.Method == "Task" { + wasTaskCalled = true + break + } + } if wasTaskCalled { mockTask.AssertExpectations(t) } @@ -2020,14 +2077,14 @@ func TestContainerdClient_RemoveContainer(t *testing.T) { mockSetup: func(mockRoot *MockRootContainerdClient, mockCont *MockContainer, mockTask *MockTask) { mockRoot.On("LoadContainer", ctx, pumbaContainerID).Return(mockCont, nil).Once() mockCont.On("Task", ctx, mock.Anything).Return(mockTask, nil).Once() - + exitChan := make(chan containerd.ExitStatus, 1) exitChan <- mockExitStatus(0, time.Now()) mockTask.On("Kill", ctx, syscall.SIGKILL).Return(nil).Once() mockTask.On("Wait", ctx).Return((<-chan containerd.ExitStatus)(exitChan), nil).Once() mockTask.On("Delete", ctx).Return(&containerd.ExitStatus{}, nil).Once() // Or just (nil, nil) if ExitStatus not checked - + // Expect Delete with WithSnapshotCleanup mockCont.On("Delete", ctx, mock.MatchedBy(func(opts []containerd.DeleteOpts) bool { // Check if WithSnapshotCleanup is present. This is a bit tricky to assert directly. @@ -2113,13 +2170,23 @@ func TestContainerdClient_RemoveContainer(t *testing.T) { mockRootCtClient.AssertExpectations(t) if !tc.dryRun && tc.expectError == "" { wasLoadCalled := false - for _, call := range mockRootCtClient.Calls { if call.Method == "LoadContainer" { wasLoadCalled = true; break } } - + for _, call := range mockRootCtClient.Calls { + if call.Method == "LoadContainer" { + wasLoadCalled = true + break + } + } + if wasLoadCalled && (len(mockRootCtClient.Calls[0].ReturnArguments) < 2 || !errors.Is(mockRootCtClient.Calls[0].ReturnArguments.Error(1), cerrdefs.ErrNotFound)) { mockContainer.AssertExpectations(t) if tc.force { // Task related mocks only if force was true wasTaskMethodCalled := false - for _, call := range mockContainer.Calls { if call.Method == "Task" { wasTaskMethodCalled = true; break } } + for _, call := range mockContainer.Calls { + if call.Method == "Task" { + wasTaskMethodCalled = true + break + } + } if wasTaskMethodCalled { mockTask.AssertExpectations(t) } @@ -2196,7 +2263,8 @@ func TestContainerdClient_PauseContainer(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { mockRootCtClient := newMockRootContainerdClient() - mockContainer := new(MockContainer); mockContainer.id = pumbaContainerID + mockContainer := new(MockContainer) + mockContainer.id = pumbaContainerID mockTask := new(MockTask) tc.mockSetup(mockRootCtClient, mockContainer, mockTask) @@ -2273,7 +2341,8 @@ func TestContainerdClient_UnpauseContainer(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { mockRootCtClient := newMockRootContainerdClient() - mockContainer := new(MockContainer); mockContainer.id = pumbaContainerID + mockContainer := new(MockContainer) + mockContainer.id = pumbaContainerID mockTask := new(MockTask) tc.mockSetup(mockRootCtClient, mockContainer, mockTask) @@ -2290,11 +2359,21 @@ func TestContainerdClient_UnpauseContainer(t *testing.T) { if !tc.dryRun && tc.expectError == "" { // Assert underlying mocks only if not dry run and no top-level error expected wasLoadCalled := false - for _, call := range mockRootCtClient.Calls { if call.Method == "LoadContainer" { wasLoadCalled = true; break } } + for _, call := range mockRootCtClient.Calls { + if call.Method == "LoadContainer" { + wasLoadCalled = true + break + } + } if wasLoadCalled { mockContainer.AssertExpectations(t) wasTaskCalled := false - for _, call := range mockContainer.Calls { if call.Method == "Task" { wasTaskCalled = true; break } } + for _, call := range mockContainer.Calls { + if call.Method == "Task" { + wasTaskCalled = true + break + } + } if wasTaskCalled { mockTask.AssertExpectations(t) } @@ -2314,7 +2393,6 @@ func TestContainerdClient_ExecContainer(t *testing.T) { defaultOCIProcess := &oci.Process{Cwd: "/", User: oci.User{UID: 0, GID: 0}} defaultSpec := &oci.Spec{Version: "1.0.2", Process: defaultOCIProcess} - tests := []struct { name string dryRun bool @@ -2336,7 +2414,7 @@ func TestContainerdClient_ExecContainer(t *testing.T) { // The execID is generated by uuid, so use mock.AnythingOfType or a matcher mockTask.On("Exec", ctx, mock.AnythingOfType("string"), mock.AnythingOfType("*containerd.ProcessSpec"), mock.Anything).Return(mockProc, nil).Once() mockProc.On("Start", ctx).Return(nil).Once() - + exitChan := make(chan containerd.ExitStatus, 1) exitChan <- mockExitStatus(0, time.Now()) // Successful exit mockProc.On("Wait", ctx).Return((<-chan containerd.ExitStatus)(exitChan), nil).Once() @@ -2354,7 +2432,7 @@ func TestContainerdClient_ExecContainer(t *testing.T) { mockCont.On("Spec", ctx).Return(defaultSpec, nil).Once() mockTask.On("Exec", ctx, mock.AnythingOfType("string"), mock.AnythingOfType("*containerd.ProcessSpec"), mock.Anything).Return(mockProc, nil).Once() mockProc.On("Start", ctx).Return(nil).Once() - + exitChan := make(chan containerd.ExitStatus, 1) exitChan <- mockExitStatus(1, time.Now()) // Failed exit mockProc.On("Wait", ctx).Return((<-chan containerd.ExitStatus)(exitChan), nil).Once() @@ -2366,7 +2444,8 @@ func TestContainerdClient_ExecContainer(t *testing.T) { name: "dry run", dryRun: true, pumbaCont: &Container{Cid: pumbaContainerID, Cname: pumbaContainerName}, - mockSetup: func(mockRoot *MockRootContainerdClient, mockCont *MockContainer, mockTask *MockTask, mockProc *MockProcess) {}, + mockSetup: func(mockRoot *MockRootContainerdClient, mockCont *MockContainer, mockTask *MockTask, mockProc *MockProcess) { + }, }, { name: "task not running", @@ -2384,7 +2463,8 @@ func TestContainerdClient_ExecContainer(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { mockRootCtClient := newMockRootContainerdClient() - mockContainer := new(MockContainer); mockContainer.id = pumbaContainerID + mockContainer := new(MockContainer) + mockContainer.id = pumbaContainerID mockTask := new(MockTask) mockProcess := new(MockProcess) tc.mockSetup(mockRootCtClient, mockContainer, mockTask, mockProcess) @@ -2461,7 +2541,7 @@ func TestContainerdClient_StartContainer(t *testing.T) { mockCont.On("Task", ctx, mock.Anything).Return(oldTask, nil).Once() oldTask.On("Status", ctx).Return(containerd.Status{Status: containerd.Stopped}, nil).Once() oldTask.On("Delete", ctx).Return(&containerd.ExitStatus{}, nil).Once() // Or (nil,nil) - + mockCont.On("NewTask", ctx, mock.AnythingOfType("cio.Creator"), mock.Anything).Return(newTask, nil).Once() newTask.On("Start", ctx).Return(nil).Once() }, @@ -2473,7 +2553,7 @@ func TestContainerdClient_StartContainer(t *testing.T) { mockSetup: func(mockRoot *MockRootContainerdClient, mockCont *MockContainer, _ *MockTask, newTask *MockTask) { mockRoot.On("LoadContainer", ctx, pumbaContainerID).Return(mockCont, nil).Once() mockCont.On("Task", ctx, mock.Anything).Return(nil, cerrdefs.ErrNotFound).Once() // No existing task - + mockCont.On("NewTask", ctx, mock.AnythingOfType("cio.Creator"), mock.Anything).Return(newTask, nil).Once() newTask.On("Start", ctx).Return(nil).Once() }, @@ -2489,8 +2569,9 @@ func TestContainerdClient_StartContainer(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { mockRootCtClient := newMockRootContainerdClient() - mockContainer := new(MockContainer); mockContainer.id = pumbaContainerID - mockTask := new(MockTask) // Represents existing/old task + mockContainer := new(MockContainer) + mockContainer.id = pumbaContainerID + mockTask := new(MockTask) // Represents existing/old task mockNewTask := new(MockTask) // Represents newly created task tc.mockSetup(mockRootCtClient, mockContainer, mockTask, mockNewTask) @@ -2533,8 +2614,8 @@ func TestContainerdClient_RestartContainer(t *testing.T) { mockSetup: func(mockRoot *MockRootContainerdClient, mockCont *MockContainer, mockStopTask *MockTask, mockStartTask *MockTask) { // --- Mocks for StopContainer part --- mockRoot.On("LoadContainer", ctx, pumbaContainerID).Return(mockCont, nil).Twice() // Once for stop, once for start - mockCont.On("Task", ctx, mock.Anything).Return(mockStopTask, nil).Once() // For stop - + mockCont.On("Task", ctx, mock.Anything).Return(mockStopTask, nil).Once() // For stop + stopExitChan := make(chan containerd.ExitStatus, 1) stopExitChan <- mockExitStatus(0, time.Now()) mockStopTask.On("Kill", ctx, syscall.SIGTERM).Return(nil).Once() @@ -2561,7 +2642,8 @@ func TestContainerdClient_RestartContainer(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { mockRootCtClient := newMockRootContainerdClient() - mockContainer := new(MockContainer); mockContainer.id = pumbaContainerID + mockContainer := new(MockContainer) + mockContainer.id = pumbaContainerID mockStopTask := new(MockTask) mockStartTask := new(MockTask) tc.mockSetup(mockRootCtClient, mockContainer, mockStopTask, mockStartTask) @@ -2604,12 +2686,12 @@ func TestContainerdClient_StopContainerWithID(t *testing.T) { mockSetup: func(mockRoot *MockRootContainerdClient, mockCont *MockContainer, mockTask *MockTask) { mockRoot.On("LoadContainer", ctx, containerID).Return(mockCont, nil).Once() // For StopByID mockCont.On("Info", ctx, mock.Anything).Return(containers.Container{ID: containerID, Labels: defaultLabels}, nil).Once() - + // Mocks for the subsequent StopContainer call // LoadContainer is called again inside StopContainer, but it's the same mockCont instance. // No need to mock LoadContainer again on mockRoot if it's for the same ID. mockCont.On("Task", ctx, mock.Anything).Return(mockTask, nil).Once() - + exitChan := make(chan containerd.ExitStatus, 1) exitChan <- mockExitStatus(0, time.Now()) mockTask.On("Kill", ctx, syscall.SIGTERM).Return(nil).Once() // Default signal @@ -2640,7 +2722,8 @@ func TestContainerdClient_StopContainerWithID(t *testing.T) { for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { mockRootCtClient := newMockRootContainerdClient() - mockContainer := new(MockContainer); mockContainer.id = containerID + mockContainer := new(MockContainer) + mockContainer.id = containerID mockTask := new(MockTask) tc.mockSetup(mockRootCtClient, mockContainer, mockTask)