abra/pkg/upstream/container/exec.go

134 lines
3.4 KiB
Go

package container // https://github.com/docker/cli/blob/master/cli/command/container/exec.go
import (
"context"
"errors"
"fmt"
"io"
"github.com/docker/cli/cli"
"github.com/docker/cli/cli/command"
"github.com/docker/docker/api/types"
apiclient "github.com/docker/docker/client"
"github.com/sirupsen/logrus"
)
// RunExec runs a command on a remote container. io.Writer corresponds to the
// command output.
func RunExec(dockerCli command.Cli, client *apiclient.Client, containerID string,
execConfig *types.ExecConfig) (io.Writer, error) {
ctx := context.Background()
// We need to check the tty _before_ we do the ContainerExecCreate, because
// otherwise if we error out we will leak execIDs on the server (and
// there's no easy way to clean those up). But also in order to make "not
// exist" errors take precedence we do a dummy inspect first.
if _, err := client.ContainerInspect(ctx, containerID); err != nil {
return nil, err
}
if !execConfig.Detach {
if err := dockerCli.In().CheckTty(execConfig.AttachStdin, execConfig.Tty); err != nil {
return nil, err
}
}
response, err := client.ContainerExecCreate(ctx, containerID, *execConfig)
if err != nil {
return nil, err
}
execID := response.ID
if execID == "" {
return nil, errors.New("exec ID empty")
}
if execConfig.Detach {
execStartCheck := types.ExecStartCheck{
Detach: execConfig.Detach,
Tty: execConfig.Tty,
}
return nil, client.ContainerExecStart(ctx, execID, execStartCheck)
}
return interactiveExec(ctx, dockerCli, client, execConfig, execID)
}
func interactiveExec(ctx context.Context, dockerCli command.Cli, client *apiclient.Client,
execConfig *types.ExecConfig, execID string) (io.Writer, error) {
// Interactive exec requested.
var (
out, stderr io.Writer
in io.ReadCloser
)
if execConfig.AttachStdin {
in = dockerCli.In()
}
if execConfig.AttachStdout {
out = dockerCli.Out()
}
if execConfig.AttachStderr {
if execConfig.Tty {
stderr = dockerCli.Out()
} else {
stderr = dockerCli.Err()
}
}
execStartCheck := types.ExecStartCheck{
Tty: execConfig.Tty,
}
resp, err := client.ContainerExecAttach(ctx, execID, execStartCheck)
if err != nil {
return out, err
}
defer resp.Close()
errCh := make(chan error, 1)
go func() {
defer close(errCh)
errCh <- func() error {
streamer := hijackedIOStreamer{
streams: dockerCli,
inputStream: in,
outputStream: out,
errorStream: stderr,
resp: resp,
tty: execConfig.Tty,
detachKeys: execConfig.DetachKeys,
}
return streamer.stream(ctx)
}()
}()
if execConfig.Tty && dockerCli.In().IsTerminal() {
if err := MonitorTtySize(ctx, client, dockerCli, execID, true); err != nil {
fmt.Fprintln(dockerCli.Err(), "Error monitoring TTY size:", err)
}
}
if err := <-errCh; err != nil {
logrus.Debugf("Error hijack: %s", err)
return out, err
}
return out, getExecExitStatus(ctx, client, execID)
}
func getExecExitStatus(ctx context.Context, client apiclient.ContainerAPIClient, execID string) error {
resp, err := client.ContainerExecInspect(ctx, execID)
if err != nil {
// If we can't connect, then the daemon probably died.
if !apiclient.IsErrConnectionFailed(err) {
return err
}
return cli.StatusError{StatusCode: -1}
}
status := resp.ExitCode
if status != 0 {
return cli.StatusError{StatusCode: status}
}
return nil
}