forked from toolshed/abra
.gitea
cli
cmd
pkg
app
autocomplete
client
compose
config
container
context
dns
git
limit
lint
recipe
secret
server
ssh
upstream
commandconn
container
exec.go
hijack.go
tty.go
convert
stack
web
scripts
tests
.drone.yml
.envrc.sample
.gitignore
.goreleaser.yml
Makefile
README.md
go.mod
go.sum
renovate.json
131 lines
3.2 KiB
Go
131 lines
3.2 KiB
Go
package container // https://github.com/docker/cli/blob/master/cli/command/container/exec.go
|
|
|
|
import (
|
|
"context"
|
|
"errors"
|
|
"fmt"
|
|
"io"
|
|
|
|
"github.com/docker/cli/cli"
|
|
"github.com/docker/cli/cli/command"
|
|
"github.com/docker/docker/api/types"
|
|
apiclient "github.com/docker/docker/client"
|
|
"github.com/sirupsen/logrus"
|
|
)
|
|
|
|
func RunExec(dockerCli command.Cli, client *apiclient.Client, containerID string, execConfig *types.ExecConfig) error {
|
|
ctx := context.Background()
|
|
|
|
// We need to check the tty _before_ we do the ContainerExecCreate, because
|
|
// otherwise if we error out we will leak execIDs on the server (and
|
|
// there's no easy way to clean those up). But also in order to make "not
|
|
// exist" errors take precedence we do a dummy inspect first.
|
|
if _, err := client.ContainerInspect(ctx, containerID); err != nil {
|
|
return err
|
|
}
|
|
if !execConfig.Detach {
|
|
if err := dockerCli.In().CheckTty(execConfig.AttachStdin, execConfig.Tty); err != nil {
|
|
return err
|
|
}
|
|
}
|
|
|
|
response, err := client.ContainerExecCreate(ctx, containerID, *execConfig)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
|
|
execID := response.ID
|
|
if execID == "" {
|
|
return errors.New("exec ID empty")
|
|
}
|
|
|
|
if execConfig.Detach {
|
|
execStartCheck := types.ExecStartCheck{
|
|
Detach: execConfig.Detach,
|
|
Tty: execConfig.Tty,
|
|
}
|
|
return client.ContainerExecStart(ctx, execID, execStartCheck)
|
|
}
|
|
return interactiveExec(ctx, dockerCli, client, execConfig, execID)
|
|
}
|
|
|
|
func interactiveExec(ctx context.Context, dockerCli command.Cli, client *apiclient.Client,
|
|
execConfig *types.ExecConfig, execID string) error {
|
|
// Interactive exec requested.
|
|
var (
|
|
out, stderr io.Writer
|
|
in io.ReadCloser
|
|
)
|
|
|
|
if execConfig.AttachStdin {
|
|
in = dockerCli.In()
|
|
}
|
|
if execConfig.AttachStdout {
|
|
out = dockerCli.Out()
|
|
}
|
|
if execConfig.AttachStderr {
|
|
if execConfig.Tty {
|
|
stderr = dockerCli.Out()
|
|
} else {
|
|
stderr = dockerCli.Err()
|
|
}
|
|
}
|
|
|
|
execStartCheck := types.ExecStartCheck{
|
|
Tty: execConfig.Tty,
|
|
}
|
|
resp, err := client.ContainerExecAttach(ctx, execID, execStartCheck)
|
|
if err != nil {
|
|
return err
|
|
}
|
|
defer resp.Close()
|
|
|
|
errCh := make(chan error, 1)
|
|
|
|
go func() {
|
|
defer close(errCh)
|
|
errCh <- func() error {
|
|
streamer := hijackedIOStreamer{
|
|
streams: dockerCli,
|
|
inputStream: in,
|
|
outputStream: out,
|
|
errorStream: stderr,
|
|
resp: resp,
|
|
tty: execConfig.Tty,
|
|
detachKeys: execConfig.DetachKeys,
|
|
}
|
|
|
|
return streamer.stream(ctx)
|
|
}()
|
|
}()
|
|
|
|
if execConfig.Tty && dockerCli.In().IsTerminal() {
|
|
if err := MonitorTtySize(ctx, client, dockerCli, execID, true); err != nil {
|
|
fmt.Fprintln(dockerCli.Err(), "Error monitoring TTY size:", err)
|
|
}
|
|
}
|
|
|
|
if err := <-errCh; err != nil {
|
|
logrus.Debugf("Error hijack: %s", err)
|
|
return err
|
|
}
|
|
|
|
return getExecExitStatus(ctx, client, execID)
|
|
}
|
|
|
|
func getExecExitStatus(ctx context.Context, client apiclient.ContainerAPIClient, execID string) error {
|
|
resp, err := client.ContainerExecInspect(ctx, execID)
|
|
if err != nil {
|
|
// If we can't connect, then the daemon probably died.
|
|
if !apiclient.IsErrConnectionFailed(err) {
|
|
return err
|
|
}
|
|
return cli.StatusError{StatusCode: -1}
|
|
}
|
|
status := resp.ExitCode
|
|
if status != 0 {
|
|
return cli.StatusError{StatusCode: status}
|
|
}
|
|
return nil
|
|
}
|