package container // https://github.com/docker/cli/blob/master/cli/command/container/exec.go import ( "context" "errors" "fmt" "io" "coopcloud.tech/abra/pkg/log" "github.com/docker/cli/cli" "github.com/docker/cli/cli/command" "github.com/docker/docker/api/types/container" apiclient "github.com/docker/docker/client" ) // RunExec runs a command on a remote container. io.Writer corresponds to the // command output. func RunExec(dockerCli command.Cli, client *apiclient.Client, containerID string, execOptions *container.ExecOptions) (io.Writer, error) { ctx := context.Background() // We need to check the tty _before_ we do the ContainerExecCreate, because // otherwise if we error out we will leak execIDs on the server (and // there's no easy way to clean those up). But also in order to make "not // exist" errors take precedence we do a dummy inspect first. if _, err := client.ContainerInspect(ctx, containerID); err != nil { return nil, err } if !execOptions.Detach { if err := dockerCli.In().CheckTty(execOptions.AttachStdin, execOptions.Tty); err != nil { return nil, err } } response, err := client.ContainerExecCreate(ctx, containerID, *execOptions) if err != nil { return nil, err } execID := response.ID if execID == "" { return nil, errors.New("exec ID empty") } if execOptions.Detach { execStartCheck := container.ExecStartOptions{ Detach: execOptions.Detach, Tty: execOptions.Tty, } return nil, client.ContainerExecStart(ctx, execID, execStartCheck) } return interactiveExec(ctx, dockerCli, client, execOptions, execID) } func interactiveExec(ctx context.Context, dockerCli command.Cli, client *apiclient.Client, execOpts *container.ExecOptions, execID string) (io.Writer, error) { // Interactive exec requested. var ( out, stderr io.Writer in io.ReadCloser ) if execOpts.AttachStdin { in = dockerCli.In() } if execOpts.AttachStdout { out = dockerCli.Out() } if execOpts.AttachStderr { if execOpts.Tty { stderr = dockerCli.Out() } else { stderr = dockerCli.Err() } } execStartCheck := container.ExecStartOptions{ Tty: execOpts.Tty, } resp, err := client.ContainerExecAttach(ctx, execID, execStartCheck) if err != nil { return out, err } defer resp.Close() errCh := make(chan error, 1) go func() { defer close(errCh) errCh <- func() error { streamer := hijackedIOStreamer{ streams: dockerCli, inputStream: in, outputStream: out, errorStream: stderr, resp: resp, tty: execOpts.Tty, detachKeys: execOpts.DetachKeys, } return streamer.stream(ctx) }() }() if execOpts.Tty && dockerCli.In().IsTerminal() { if err := MonitorTtySize(ctx, client, dockerCli, execID, true); err != nil { fmt.Fprintln(dockerCli.Err(), "Error monitoring TTY size:", err) } } if err := <-errCh; err != nil { log.Debugf("Error hijack: %s", err) return out, err } return out, getExecExitStatus(ctx, client, execID) } func getExecExitStatus(ctx context.Context, client apiclient.ContainerAPIClient, execID string) error { resp, err := client.ContainerExecInspect(ctx, execID) if err != nil { // If we can't connect, then the daemon probably died. if !apiclient.IsErrConnectionFailed(err) { return err } return cli.StatusError{StatusCode: -1} } status := resp.ExitCode if status != 0 { return cli.StatusError{StatusCode: status} } return nil }