Compare commits

...

67 Commits

Author SHA1 Message Date
af2647d55b Merge pull request #1634 from thaJeztah/18.09_bump_golang_1.10.8
[18.09] Bump Golang 1.10.8 (CVE-2019-6486)
2019-01-24 14:27:59 +01:00
c71aa11c0a [18.09] Bump Golang 1.10.8 (CVE-2019-6486)
See the milestone for details;
https://github.com/golang/go/issues?q=milestone%3AGo1.10.8+label%3ACherryPickApproved

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2019-01-24 02:07:03 +01:00
336b2a5cac Merge pull request #1580 from thaJeztah/18.09_backport_e2e-invocation-nit
[18.09 backport] e2e updates
2018-12-19 14:20:03 +01:00
c462e06fcd e2e: assign a default value of 0 to DOCKERD_EXPERIMENTAL
Currently running the e2e tests produces a warning/error:

    $ make -f docker.Makefile test-e2e
    «...»
    docker run --rm -v /var/run/docker.sock:/var/run/docker.sock docker-cli-e2e
    ./scripts/test/e2e/run: line 20: test: : integer expression expected

This is from:

    test "${DOCKERD_EXPERIMENTAL:-}" -eq "1" && «...»

Where `${DOCKERD_EXPERIMENTAL:-}` expands to the empty string, resulting in
`test "" -eq "1"` which produces the warning. This error is enough to trigger
the short-circuiting behaviour of `&&` so the result is as expected, but fix
the issue nonetheless by provdiing a default `0`.

Signed-off-by: Ian Campbell <ijc@docker.com>
(cherry picked from commit 4f483276cf)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2018-12-17 17:23:04 +01:00
719508a935 connhelper: add e2e
Signed-off-by: Akihiro Suda <suda.akihiro@lab.ntt.co.jp>
(cherry picked from commit 9b148db87a)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2018-12-17 17:22:58 +01:00
2fa3aae9ed Merge pull request #1575 from thaJeztah/bump_golang_1.10.6
[18.09] Bump Golang 1.10.6 (CVE-2018-16875)
2018-12-14 20:56:04 +00:00
6c3a10aaed Bump Golang 1.10.6 (CVE-2018-16875)
go1.10.6 (released 2018/12/14)

- crypto/x509: CPU denial of service in chain validation golang/go#29233
- cmd/go: directory traversal in "go get" via curly braces in import paths golang/go#29231
- cmd/go: remote command execution during "go get -u" golang/go#29230

See the Go 1.10.6 milestone on the issue tracker for details:
https://github.com/golang/go/issues?q=milestone%3AGo1.10.6

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2018-12-14 01:41:33 +01:00
3ee6755815 Merge pull request #1567 from thaJeztah/18.09_backport_fix_panic_on_update
[18.09 backport] Fix panic (npe) when updating service limits/reservations
2018-12-13 10:39:37 +00:00
16349f6e33 Fix panic (npe) when updating service limits/reservations
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit 579bb91853)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2018-12-13 02:22:43 +01:00
2aa77af30f Merge pull request #1554 from thaJeztah/18.09_backport_completion-import--platform
[18.09 backport] Add bash completion for `import --platform`
2018-12-07 13:10:27 -08:00
456c1ce695 Merge pull request #1553 from thaJeztah/18.09_backport_completion-log-driver-local
[18.09 backport] Add bash completion for "local" log driver
2018-12-07 13:10:06 -08:00
bcadc9061c Add bash completion for import --platform
Signed-off-by: Harald Albers <github@albersweb.de>
(cherry picked from commit e0fe546c37)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2018-12-07 20:08:35 +01:00
e05745b4a5 Add bash completion for "local" log driver
Ref: https://github.com/moby/moby/pull/37092

Also adds log-opt `compress` to json-file log driver because this was
also added in the referenced PR.

Signed-off-by: Harald Albers <github@albersweb.de>
(cherry picked from commit c59038b15c)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2018-12-07 20:05:08 +01:00
b6ecef353f Merge pull request #1552 from thaJeztah/18.09_backport_fix_filter_panic
[18.09 backport] Fix panic when pruning images with label-filter
2018-12-07 19:29:32 +01:00
e380ddaddf Fix panic when pruning images with label-filter
Before this change:

    docker image prune --force --filter "label=foobar"
    panic: assignment to entry in nil map

    goroutine 1 [running]:
    github.com/docker/cli/vendor/github.com/docker/docker/api/types/filters.Args.Add(...)
    /go/src/github.com/docker/cli/vendor/github.com/docker/docker/api/types/filters/parse.go:167
    github.com/docker/cli/cli/command/image.runPrune(0x1db3a20, 0xc000344cf0, 0x16e0001, 0xc00015e600, 0x4, 0x3, 0xc00024e160, 0xc000545c70, 0x5ab4b5)
    /go/src/github.com/docker/cli/cli/command/image/prune.go:79 +0xbaf
    github.com/docker/cli/cli/command/image.NewPruneCommand.func1(0xc00029ef00, 0xc0004a8180, 0x0, 0x3, 0x0, 0x0)
    /go/src/github.com/docker/cli/cli/command/image/prune.go:32 +0x64
    github.com/docker/cli/vendor/github.com/spf13/cobra.(*Command).execute(0xc00029ef00, 0xc000038210, 0x3, 0x3, 0xc00029ef00, 0xc000038210)
    /go/src/github.com/docker/cli/vendor/github.com/spf13/cobra/command.go:762 +0x473
    github.com/docker/cli/vendor/github.com/spf13/cobra.(*Command).ExecuteC(0xc000127180, 0xc000272770, 0x1836ce0, 0xc000272780)
    /go/src/github.com/docker/cli/vendor/github.com/spf13/cobra/command.go:852 +0x2fd
    github.com/docker/cli/vendor/github.com/spf13/cobra.(*Command).Execute(0xc000127180, 0xc000127180, 0x1d60880)
    /go/src/github.com/docker/cli/vendor/github.com/spf13/cobra/command.go:800 +0x2b
    main.main()
    /go/src/github.com/docker/cli/cmd/docker/docker.go:180 +0xdc

With this patch applied:

    docker image prune --force --filter "label=foobar"
    Total reclaimed space: 0B

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit 1e1dd5bca4)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2018-12-07 17:58:38 +01:00
12834eeff6 Merge pull request #1542 from thaJeztah/18.09_backport_completion_cli_experimental
[18.09 backport] Add bash completion for experimental CLI commands (manifest)
2018-12-03 13:34:56 -08:00
bb46da9fba Merge pull request #1544 from thaJeztah/18.09_bump_go_to_1.10.5
[18.09] Bump Go to 1.10.5
2018-11-30 14:03:12 -08:00
871d24d3fc Bump Go to 1.10.5
go1.10.5 (released 2018/11/02) includes fixes to the go command, linker,
runtime and the database/sql package. See the milestone on the issue
tracker for details:

List of changes; https://github.com/golang/go/issues?q=milestone%3AGo1.10.5

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2018-11-30 21:59:54 +01:00
61a9096b8d Merge pull request #1540 from thaJeztah/18.09_backport_fix_flags_in_usage
[18.09 backport] Fix yamldocs outputing `[flags]` in usage output
2018-11-29 13:26:27 -08:00
2ac475cf97 Add bash completion for manifest command family
Signed-off-by: Harald Albers <github@albersweb.de>
(cherry picked from commit 0fb4256a00)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2018-11-29 17:15:06 +01:00
2a36695037 Add support for experimental cli features to bash completion
This is needed for implementing bash completion for the `docker manifest`
command family.

Signed-off-by: Harald Albers <github@albersweb.de>
(cherry picked from commit a183c952c6)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2018-11-29 17:15:04 +01:00
dc74fc81f2 Refactor usage of docker version in bash completion
This preapares bash completion for more context sensitivity:

- experimental cli features
- orchestrator specific features

Also renames _daemon_ to _server_ where used in context of `docker version`
because the fields there are grouped unter _Server_.

Signed-off-by: Harald Albers <github@albersweb.de>
(cherry picked from commit 564d4da06e)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2018-11-29 17:15:02 +01:00
7e90635652 Fix yamldocs outputing [flags] in usage output
A similar change was made in the CLI itself, but is not
inherited by the code that generates the YAML docs.

Before this patch is applied;

```
usage: docker container exec [OPTIONS] CONTAINER COMMAND [ARG...] [flags]
```

With this patch applied:

```
usage: docker container exec [OPTIONS] CONTAINER COMMAND [ARG...]
```

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit 44d96e9120)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2018-11-29 15:44:16 +01:00
3f7989903a Merge pull request #1454 from thaJeztah/18.09_backport_defaulttcpschema
[18.09 backport] fixes #1441 set default schema to tcp for docker host
2018-11-27 09:32:51 -08:00
7059d069c3 Merge pull request #1532 from tiborvass/18.09-fix-system-prune-filters
[18.09] prune: move image pruning before build cache pruning
2018-11-26 16:07:21 -08:00
4a4a1f3615 prune: move image pruning before build cache pruning
This is cleaner because running system prune twice in a row
now results in a no-op the second time.

Signed-off-by: Tibor Vass <tibor@docker.com>
(cherry picked from commit 6c10abb247)
Signed-off-by: Tibor Vass <tibor@docker.com>
2018-11-21 22:01:54 +00:00
1274f23252 Merge pull request #1531 from thaJeztah/18.09_backport_builder_docs
[18.09 backport] builder documentation updates
2018-11-21 18:10:29 +01:00
3af1848dda buildkit reference docs
Signed-off-by: Tonis Tiigi <tonistiigi@gmail.com>
Signed-off-by: Tibor Vass <tibor@docker.com>
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit 83aeb219f0)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2018-11-21 17:16:30 +01:00
6d91f5d55d Documenting ENTRYPOINT can empty value of CMD
Signed-off-by: Brandon Mitchell <git@bmitch.net>
(cherry picked from commit cc316fde55)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2018-11-21 17:16:24 +01:00
d56948c12c Merge pull request #1530 from thaJeztah/18.09_backport_add_logging_driver_example
[18.09 backport] Update daemon.json example to show that log-opts must be a string
2018-11-21 17:10:02 +01:00
9b3eea87ee Update daemon.json example to show that log-opts must be a string
log-opts are passed to logging-drivers as-is, so the daemon is not
aware what value-type each option takes.

For this reason, all options must be provided as a string, even if
they are used as numeric values by the logging driver.

For example, to pass the "max-file" option to the default (json-file)
logging driver, this value has to be passed as a string;

```json
{
  "log-driver": "json-file",
  "log-opts": {
    "max-size": "10m",
    "max-file": "3"
  }
}
```

When passed as a _number_ (`"max-file": 3`), the daemon will invalidate
the configuration file, and fail to start;

    unable to configure the Docker daemon with file /etc/docker/daemon.json: json: cannot unmarshal number into Go value of type string

This patch adds an example to the daemon.json to show these  values
have to be passed as strings.

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit fd33e0d933)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2018-11-21 15:34:41 +01:00
31c092e155 Merge pull request #1526 from thaJeztah/18.09_backport_completion_fix_service__force
[18.09 backport] Fix bash completion for `service update --force`
2018-11-21 11:38:28 +01:00
046ffa4e87 Fix bash completion for service update --force
- `--force` is not available in `service create`
- `--force` is a boolean option

Signed-off-by: Harald Albers <github@albersweb.de>
(cherry picked from commit 5fa5eb1da6)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2018-11-20 18:18:33 +01:00
51668a30f2 Merge pull request #1483 from thaJeztah/18.09_backport_docs_fixes
[18.09 backport] various docs fixes
2018-10-26 15:54:26 +01:00
5e7f9d3c84 docs, ssh: unsupport password auth explicitly
The issue with password auth is tracked in #1476 and #1477 .

Signed-off-by: Akihiro Suda <suda.akihiro@lab.ntt.co.jp>
(cherry picked from commit 16b014e062)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2018-10-25 15:30:33 +02:00
72ddefbada Documenting example default-ulimit in daemon.json
Signed-off-by: Brandon Mitchell <git@bmitch.net>
(cherry picked from commit 3f4f450941)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2018-10-25 15:30:31 +02:00
135aa72476 Updating rmi doc example to specify latest tag
Signed-off-by: Brandon Mitchell <git@bmitch.net>
(cherry picked from commit f913b73c81)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2018-10-25 15:30:28 +02:00
7c7fe26a6f Minor typo fix in run documentation
Quick syntax fix!

Signed-off-by: Scott Brenner <scott@scottbrenner.me>
(cherry picked from commit 50143cff12)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2018-10-25 15:30:26 +02:00
1df47ffb4d Clarify in docs that docker tag doesn't publish
I am attempting to push a tag to a private repository. The documentation for `docker tag`  has an explicit example to for how ["To push an image to a private registry"](https://docs.docker.com/engine/reference/commandline/tag/#tag-an-image-referenced-by-name). My colleague clarified that this command does not in fact push anything, so I thought this PR might save some future novice the same confusion.

Signed-off-by: Jake Lambert <jake.lambert@volusion.com>
(cherry picked from commit 4ed484bac4)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2018-10-25 15:30:24 +02:00
2e7e529a18 Fix login documentation layout
ddadd3db49 mass standardized the
formatting, with some errors.

This commit fixes errors on `login.md`:
- revert wrong `Logging out` headline
- restore correct level for some headlines (relative to parent
  headline level change)
- re-add `Usage` headlines, with better name

Also add `related commands` headline on `login` and `logout`.

Signed-off-by: Thomas Riccardi <thomas@deepomatic.com>
(cherry picked from commit a0e3ec8790)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2018-10-25 15:30:22 +02:00
f8f230181e Docs: Add Spaces Around Parenthesis Where Needed
Signed-off-by: Alex Mayer <amayer5125@gmail.com>
(cherry picked from commit 2b0fdd0f17)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2018-10-25 15:30:19 +02:00
0ee4693953 Typo fix
Signed-off-by: Lihua Tang <lhtang@alauda.io>
(cherry picked from commit ca5e453180)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2018-10-25 15:30:17 +02:00
cb4cd04c64 Typo fix: filesytem -> filesystem
Signed-off-by: Rui Cao <ruicao@alauda.io>
(cherry picked from commit 2eb95909ee)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2018-10-25 15:30:15 +02:00
d2e771fed6 update docs with current supported features options
Signed-off-by: Anda Xu <anda.xu@docker.com>
(cherry picked from commit d656706678)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2018-10-25 15:30:09 +02:00
b8911a3b33 Merge pull request #1481 from thaJeztah/18.09_backport_fix_docker_invalid_subcommand
[18.09 backport] Fix `docker invalid-subcommand` regression
2018-10-25 09:35:28 +02:00
ebe071a9b3 Fix docker invalid-subcommand regression
Starting with a3fe7d62b8,
`docker invalid-subcommand` did not exit with non-zero status.

Fix #1428

Signed-off-by: Akihiro Suda <suda.akihiro@lab.ntt.co.jp>
(cherry picked from commit d708cada43)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2018-10-24 20:49:10 +02:00
ecb972ab38 Merge pull request #1475 from tiborvass/18.09-fix-build-stream
[18.09] build: update session support detection
2018-10-22 19:24:24 -07:00
4c68a9666f build: update session support detection
Avoid testing for session support in non-buildkit builder to support
servers that falsely report as `1.39` compatible

Signed-off-by: Tibor Vass <tibor@docker.com>
(cherry picked from commit 3e8c41beb0)
Signed-off-by: Tibor Vass <tibor@docker.com>
2018-10-22 23:02:08 +00:00
e245b72381 Merge pull request #1458 from dhiltgen/bump_licensing_lib
[18.09] Bump licensing lib
2018-10-19 13:58:37 -07:00
0ff9e5cd10 Remove e-mail from trial flow
Signed-off-by: Daniel Hiltgen <daniel.hiltgen@docker.com>
2018-10-18 14:56:59 -07:00
8e565d0399 Bump licensing library
Removes the billing profile flow which is now handled on the back-end.

Signed-off-by: Daniel Hiltgen <daniel.hiltgen@docker.com>
2018-10-18 14:44:05 -07:00
8a424333f9 Merge pull request #1455 from thaJeztah/18.09_backport_legacy_drivers
[18.09 backport] deprecate devicemapper and legacy overlay storage drivers
2018-10-18 00:46:50 +02:00
fde819236b Deprecate "devicemapper" storage driver.
The `devicemapper` storage driver is deprecated in favor of `overlay2`, and will
be removed in a future release. Users of the `devicemapper` storage driver are
recommended to migrate to a different storage driver, such as `overlay2`, which
is now the default storage driver.

The `devicemapper` storage driver facilitates running Docker on older (3.x) kernels
that have no support for other storage drivers (such as overlay2, or AUFS).

Now that support for `overlay2` is added to all supported distros (as they are
either on kernel 4.x, or have support for multiple lowerdirs backported), there
is no reason to continue maintenance of the `devicemapper` storage driver.

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit 662441ba31)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2018-10-17 18:04:04 +02:00
aa6314c663 Deprecate legacy overlay storage driver
The `overlay` storage driver is deprecated in favor of the `overlay2` storage
driver, which has all the benefits of `overlay`, without its limitations (excessive
inode consumption). The legacy `overlay` storage driver will be removed in a future
release. Users of the `overlay` storage driver should migrate to the `overlay2`
storage driver.

The legacy `overlay` storage driver allowed using overlayFS-backed filesystems
on pre 4.x kernels. Now that all supported distributions are able to run `overlay2`
(as they are either on kernel 4.x, or have support for multiple lowerdirs
backported), there is no reason to keep maintaining the `overlay` storage driver.

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit 8bc2aa45a6)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2018-10-17 18:03:57 +02:00
81ee98e861 Merge pull request #1453 from tiborvass/18.09-builder-prune-filter-unused-for
[18.09 backport] builder/prune: rename max-age filter to unused-for in help output
2018-10-17 17:56:13 +02:00
8ae4453d46 add test case TestNewAPIClientFromFlagsForDefaultSchema
Signed-off-by: Lifubang <lifubang@acmcoder.com>
(cherry picked from commit beed8748c0)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2018-10-17 17:48:07 +02:00
aeea559129 set default schema to tcp for docker host
Signed-off-by: Lifubang <lifubang@acmcoder.com>
(cherry picked from commit 2431dd1448)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2018-10-17 17:47:58 +02:00
22336b332c Merge pull request #1432 from thaJeztah/18.09_backport_use_string_builder
[18.09] backport using strings.Builder instead of string appending
2018-10-16 19:42:29 -07:00
2961611fda builder/prune: rename max-age filter to unused-for in help output
Signed-off-by: Tibor Vass <tibor@docker.com>
(cherry picked from commit c9ce6dc656)
Signed-off-by: Tibor Vass <tibor@docker.com>
2018-10-17 00:37:24 +00:00
17adf05188 Merge pull request #1421 from dhiltgen/final_url
[18.09] Update release note link to final location
2018-10-16 19:42:48 +02:00
39f1110308 Merge pull request #1438 from thaJeztah/18.09_backport_hide-buildkit-flags-if-not-enabled
[18.09 backport] builder / buildkit updates
2018-10-11 15:35:44 -07:00
3dfacb55a4 build: only show buildkit-specific flags if buildkit is enabled
Signed-off-by: Tibor Vass <tibor@docker.com>
(cherry picked from commit bbd01fe3df)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2018-10-12 00:16:48 +02:00
e942084530 build: add SSH agent socket forwarder (docker build --ssh $SSHMOUNTID=$SSH_AUTH_SOCK)
Unlike `docker build --secret`, `docker build --ssh` allows the build container to
use SSH keys with passphrases.

  $ eval $(ssh-agent)
  $ ssh-add ~/.ssh/id_rsa
  (Input your passphrase here)
  $ docker build --ssh default=$SSH_AUTH_SOCK ...

This feature requires the daemon with `CapExecMountSSH` build capability (moby/moby#37973) .

Currently, the official Dockerfile frontend does not provide the syntax for using the SSH forwarder.

However, the experimental `RUN --mount=type=ssh` syntax can be enabled by using
the Dockerfile frontend image built with the `BUILDTAGS="dfrunmount dfssh"`, via the `# syntax =` "shebang".

The Dockerfile for the Dockerfile frontend is available at  github.com/moby/buildkit/frontend/dockerfile/cmd/dockerfile-frontend)
The pre-built image is also available as `tonistiigi/dockerfile:ssh20181002` .

An example Dockerfile with `RUN --mount=type=ssh`:

  # syntax = tonistiigi/dockerfile:ssh20181002
  FROM alpine
  RUN apk add --no-cache openssh-client
  RUN mkdir -p -m 0700 ~/.ssh && ssh-keyscan gitlab.com >> ~/.ssh/known_hosts
  RUN --mount=type=ssh ssh git@gitlab.com | tee /hello
  # "Welcome to GitLab, @GITLAB_USERNAME_ASSOCIATED_WITH_SSHKEY" should be printed here

More info available at moby/buildkit#608, moby/buildkit#655

Signed-off-by: Akihiro Suda <suda.akihiro@lab.ntt.co.jp>
(cherry picked from commit db7399a016)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2018-10-12 00:16:42 +02:00
50f529fa47 bump up buildkit
Signed-off-by: Akihiro Suda <suda.akihiro@lab.ntt.co.jp>
(cherry picked from commit 846c38cbd7)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2018-10-12 00:16:38 +02:00
b4bee9be75 Bump buildkit and dependencies to 39404586a50d1b9d0fb1c578cf0f4de7bdb7afe5
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
(cherry picked from commit 8cfd24049f)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2018-10-12 00:16:29 +02:00
8b0d34a5a1 Using strings.Builder instead of string appending
Signed-off-by: Li Yi <denverdino@gmail.com>
(cherry picked from commit 814ced4b30)
Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2018-10-10 20:21:52 +02:00
f93908213a Update release note link to final location
We'll be using a redirect from this URL to the back-end docs system for
hosting release notes. Final location confirmed with Docs team and PM.

Signed-off-by: Daniel Hiltgen <daniel.hiltgen@docker.com>
2018-10-08 16:25:49 -07:00
151 changed files with 20938 additions and 808 deletions

View File

@ -4,7 +4,7 @@ clone_folder: c:\gopath\src\github.com\docker\cli
environment:
GOPATH: c:\gopath
GOVERSION: 1.10.4
GOVERSION: 1.10.8
DEPVERSION: v0.4.1
install:

View File

@ -45,7 +45,7 @@ func NewPruneCommand(dockerCli command.Cli) *cobra.Command {
flags := cmd.Flags()
flags.BoolVarP(&options.force, "force", "f", false, "Do not prompt for confirmation")
flags.BoolVarP(&options.all, "all", "a", false, "Remove all unused images, not just dangling ones")
flags.Var(&options.filter, "filter", "Provide filter values (e.g. 'max-age=24h')")
flags.Var(&options.filter, "filter", "Provide filter values (e.g. 'unused-for=24h')")
flags.Var(&options.keepStorage, "keep-storage", "Amount of disk space to keep for cache")
return cmd

View File

@ -8,6 +8,7 @@ import (
"os"
"path/filepath"
"runtime"
"strconv"
"time"
"github.com/docker/cli/cli"
@ -133,6 +134,20 @@ func (cli *DockerCli) ContentTrustEnabled() bool {
return cli.contentTrust
}
// BuildKitEnabled returns whether buildkit is enabled either through a daemon setting
// or otherwise the client-side DOCKER_BUILDKIT environment variable
func BuildKitEnabled(si ServerInfo) (bool, error) {
buildkitEnabled := si.BuildkitVersion == types.BuilderBuildKit
if buildkitEnv := os.Getenv("DOCKER_BUILDKIT"); buildkitEnv != "" {
var err error
buildkitEnabled, err = strconv.ParseBool(buildkitEnv)
if err != nil {
return false, errors.Wrap(err, "DOCKER_BUILDKIT environment variable expects boolean value")
}
}
return buildkitEnabled, nil
}
// ManifestStore returns a store for local manifests
func (cli *DockerCli) ManifestStore() manifeststore.Store {
// TODO: support override default location from config file
@ -259,21 +274,17 @@ func NewDockerCli(in io.ReadCloser, out, err io.Writer, isTrusted bool, containe
// NewAPIClientFromFlags creates a new APIClient from command line flags
func NewAPIClientFromFlags(opts *cliflags.CommonOptions, configFile *configfile.ConfigFile) (client.APIClient, error) {
unparsedHost, err := getUnparsedServerHost(opts.Hosts)
host, err := getServerHost(opts.Hosts, opts.TLSOptions)
if err != nil {
return &client.Client{}, err
}
var clientOpts []func(*client.Client) error
helper, err := connhelper.GetConnectionHelper(unparsedHost)
helper, err := connhelper.GetConnectionHelper(host)
if err != nil {
return &client.Client{}, err
}
if helper == nil {
clientOpts = append(clientOpts, withHTTPClient(opts.TLSOptions))
host, err := dopts.ParseHost(opts.TLSOptions != nil, unparsedHost)
if err != nil {
return &client.Client{}, err
}
clientOpts = append(clientOpts, client.WithHost(host))
} else {
clientOpts = append(clientOpts, func(c *client.Client) error {
@ -306,7 +317,7 @@ func NewAPIClientFromFlags(opts *cliflags.CommonOptions, configFile *configfile.
return client.NewClientWithOpts(clientOpts...)
}
func getUnparsedServerHost(hosts []string) (string, error) {
func getServerHost(hosts []string, tlsOptions *tlsconfig.Options) (string, error) {
var host string
switch len(hosts) {
case 0:
@ -316,7 +327,8 @@ func getUnparsedServerHost(hosts []string) (string, error) {
default:
return "", errors.New("Please specify only one -H")
}
return host, nil
return dopts.ParseHost(tlsOptions != nil, host)
}
func withHTTPClient(tlsOpts *tlsconfig.Options) func(*client.Client) error {

View File

@ -43,6 +43,26 @@ func TestNewAPIClientFromFlags(t *testing.T) {
assert.Check(t, is.Equal(api.DefaultVersion, apiclient.ClientVersion()))
}
func TestNewAPIClientFromFlagsForDefaultSchema(t *testing.T) {
host := ":2375"
opts := &flags.CommonOptions{Hosts: []string{host}}
configFile := &configfile.ConfigFile{
HTTPHeaders: map[string]string{
"My-Header": "Custom-Value",
},
}
apiclient, err := NewAPIClientFromFlags(opts, configFile)
assert.NilError(t, err)
assert.Check(t, is.Equal("tcp://localhost"+host, apiclient.DaemonHost()))
expectedHeaders := map[string]string{
"My-Header": "Custom-Value",
"User-Agent": UserAgent(),
}
assert.Check(t, is.DeepEqual(expectedHeaders, apiclient.(*client.Client).CustomHTTPHeaders()))
assert.Check(t, is.Equal(api.DefaultVersion, apiclient.ClientVersion()))
}
func TestNewAPIClientFromFlagsWithAPIVersionFromEnv(t *testing.T) {
customVersion := "v3.3.3"
defer env.Patch(t, "DOCKER_API_VERSION", customVersion)()

View File

@ -90,7 +90,7 @@ func (c mockLicenseClient) GetHubUserByName(ctx context.Context, username string
func (c mockLicenseClient) VerifyLicense(ctx context.Context, license model.IssuedLicense) (res *model.CheckResponse, err error) {
return nil, fmt.Errorf("not implemented")
}
func (c mockLicenseClient) GenerateNewTrialSubscription(ctx context.Context, authToken, dockerID, email string) (subscriptionID string, err error) {
func (c mockLicenseClient) GenerateNewTrialSubscription(ctx context.Context, authToken, dockerID string) (subscriptionID string, err error) {
return "", fmt.Errorf("not implemented")
}
func (c mockLicenseClient) ListSubscriptions(ctx context.Context, authToken, dockerID string) (response []*model.Subscription, err error) {

View File

@ -117,7 +117,7 @@ func processVersions(currentVersion, verType string,
availUpdates = append(availUpdates, clitypes.Update{
Type: verType,
Version: ver.Tag,
Notes: fmt.Sprintf("%s/%s", clitypes.ReleaseNotePrefix, ver.Tag),
Notes: fmt.Sprintf("%s?%s", clitypes.ReleaseNotePrefix, ver.Tag),
})
}
}

View File

@ -1,11 +1,11 @@
TYPE VERSION NOTES
current 1.1.0
patch 1.1.1 https://docs.docker.com/releasenotes/1.1.1
patch 1.1.2 https://docs.docker.com/releasenotes/1.1.2
patch 1.1.3-beta1 https://docs.docker.com/releasenotes/1.1.3-beta1
upgrade 1.2.0 https://docs.docker.com/releasenotes/1.2.0
upgrade 2.0.0 https://docs.docker.com/releasenotes/2.0.0
upgrade 2.1.0-beta1 https://docs.docker.com/releasenotes/2.1.0-beta1
downgrade 1.0.1 https://docs.docker.com/releasenotes/1.0.1
downgrade 1.0.2 https://docs.docker.com/releasenotes/1.0.2
downgrade 1.0.3-beta1 https://docs.docker.com/releasenotes/1.0.3-beta1
patch 1.1.1 https://docker.com/engine/releasenotes?1.1.1
patch 1.1.2 https://docker.com/engine/releasenotes?1.1.2
patch 1.1.3-beta1 https://docker.com/engine/releasenotes?1.1.3-beta1
upgrade 1.2.0 https://docker.com/engine/releasenotes?1.2.0
upgrade 2.0.0 https://docker.com/engine/releasenotes?2.0.0
upgrade 2.1.0-beta1 https://docker.com/engine/releasenotes?2.1.0-beta1
downgrade 1.0.1 https://docker.com/engine/releasenotes?1.0.1
downgrade 1.0.2 https://docker.com/engine/releasenotes?1.0.2
downgrade 1.0.3-beta1 https://docker.com/engine/releasenotes?1.0.3-beta1

View File

@ -1,6 +1,6 @@
TYPE VERSION NOTES
current 1.1.0
patch 1.1.1 https://docs.docker.com/releasenotes/1.1.1
patch 1.1.2 https://docs.docker.com/releasenotes/1.1.2
upgrade 1.2.0 https://docs.docker.com/releasenotes/1.2.0
upgrade 2.0.0 https://docs.docker.com/releasenotes/2.0.0
patch 1.1.1 https://docker.com/engine/releasenotes?1.1.1
patch 1.1.2 https://docker.com/engine/releasenotes?1.1.2
upgrade 1.2.0 https://docker.com/engine/releasenotes?1.2.0
upgrade 2.0.0 https://docker.com/engine/releasenotes?2.0.0

View File

@ -1,8 +1,8 @@
TYPE VERSION NOTES
current 1.1.0
patch 1.1.1 https://docs.docker.com/releasenotes/1.1.1
patch 1.1.2 https://docs.docker.com/releasenotes/1.1.2
upgrade 1.2.0 https://docs.docker.com/releasenotes/1.2.0
upgrade 2.0.0 https://docs.docker.com/releasenotes/2.0.0
downgrade 1.0.1 https://docs.docker.com/releasenotes/1.0.1
downgrade 1.0.2 https://docs.docker.com/releasenotes/1.0.2
patch 1.1.1 https://docker.com/engine/releasenotes?1.1.1
patch 1.1.2 https://docker.com/engine/releasenotes?1.1.2
upgrade 1.2.0 https://docker.com/engine/releasenotes?1.2.0
upgrade 2.0.0 https://docker.com/engine/releasenotes?2.0.0
downgrade 1.0.1 https://docker.com/engine/releasenotes?1.0.1
downgrade 1.0.2 https://docker.com/engine/releasenotes?1.0.2

View File

@ -1,4 +1,4 @@
TYPE VERSION NOTES
current 1.1.0
patch 1.1.1 https://docs.docker.com/releasenotes/1.1.1
patch 1.1.2 https://docs.docker.com/releasenotes/1.1.2
patch 1.1.1 https://docker.com/engine/releasenotes?1.1.1
patch 1.1.2 https://docker.com/engine/releasenotes?1.1.2

View File

@ -13,7 +13,6 @@ import (
"path/filepath"
"regexp"
"runtime"
"strconv"
"strings"
"github.com/docker/cli/cli"
@ -73,6 +72,7 @@ type buildOptions struct {
platform string
untrusted bool
secrets []string
ssh []string
}
// dockerfileFromStdin returns true when the user specified that the Dockerfile
@ -136,6 +136,8 @@ func NewBuildCommand(dockerCli command.Cli) *cobra.Command {
flags.BoolVar(&options.pull, "pull", false, "Always attempt to pull a newer version of the image")
flags.StringSliceVar(&options.cacheFrom, "cache-from", []string{}, "Images to consider as cache sources")
flags.BoolVar(&options.compress, "compress", false, "Compress the build context using gzip")
flags.SetAnnotation("compress", "no-buildkit", nil)
flags.StringSliceVar(&options.securityOpt, "security-opt", []string{}, "Security options")
flags.StringVar(&options.networkMode, "network", "default", "Set the networking mode for the RUN instructions during build")
flags.SetAnnotation("network", "version", []string{"1.25"})
@ -153,11 +155,18 @@ func NewBuildCommand(dockerCli command.Cli) *cobra.Command {
flags.BoolVar(&options.stream, "stream", false, "Stream attaches to server to negotiate build context")
flags.SetAnnotation("stream", "experimental", nil)
flags.SetAnnotation("stream", "version", []string{"1.31"})
flags.SetAnnotation("stream", "no-buildkit", nil)
flags.StringVar(&options.progress, "progress", "auto", "Set type of progress output (only if BuildKit enabled) (auto, plain, tty). Use plain to show container output")
flags.StringVar(&options.progress, "progress", "auto", "Set type of progress output (auto, plain, tty). Use plain to show container output")
flags.SetAnnotation("progress", "buildkit", nil)
flags.StringArrayVar(&options.secrets, "secret", []string{}, "Secret file to expose to the build (only if BuildKit enabled): id=mysecret,src=/local/secret")
flags.SetAnnotation("secret", "version", []string{"1.39"})
flags.SetAnnotation("secret", "buildkit", nil)
flags.StringArrayVar(&options.ssh, "ssh", []string{}, "SSH agent socket or keys to expose to the build (only if BuildKit enabled) (format: default|<id>[=<socket>|<key>[,<key>]])")
flags.SetAnnotation("ssh", "version", []string{"1.39"})
flags.SetAnnotation("ssh", "buildkit", nil)
return cmd
}
@ -179,22 +188,17 @@ func (out *lastProgressOutput) WriteProgress(prog progress.Progress) error {
// nolint: gocyclo
func runBuild(dockerCli command.Cli, options buildOptions) error {
if buildkitEnv := os.Getenv("DOCKER_BUILDKIT"); buildkitEnv != "" {
enableBuildkit, err := strconv.ParseBool(buildkitEnv)
if err != nil {
return errors.Wrap(err, "DOCKER_BUILDKIT environment variable expects boolean value")
}
if enableBuildkit {
return runBuildBuildKit(dockerCli, options)
}
} else if dockerCli.ServerInfo().BuildkitVersion == types.BuilderBuildKit {
buildkitEnabled, err := command.BuildKitEnabled(dockerCli.ServerInfo())
if err != nil {
return err
}
if buildkitEnabled {
return runBuildBuildKit(dockerCli, options)
}
var (
buildCtx io.ReadCloser
dockerfileCtx io.ReadCloser
err error
contextDir string
tempDir string
relDockerfile string
@ -346,7 +350,7 @@ func runBuild(dockerCli command.Cli, options buildOptions) error {
buildCtx = dockerfileCtx
}
s, err := trySession(dockerCli, contextDir)
s, err := trySession(dockerCli, contextDir, true)
if err != nil {
return err
}

View File

@ -27,10 +27,11 @@ import (
"github.com/moby/buildkit/session/auth/authprovider"
"github.com/moby/buildkit/session/filesync"
"github.com/moby/buildkit/session/secrets/secretsprovider"
"github.com/moby/buildkit/session/sshforward/sshprovider"
"github.com/moby/buildkit/util/appcontext"
"github.com/moby/buildkit/util/progress/progressui"
"github.com/pkg/errors"
"github.com/tonistiigi/fsutil"
fsutiltypes "github.com/tonistiigi/fsutil/types"
"golang.org/x/sync/errgroup"
)
@ -42,7 +43,7 @@ var errDockerfileConflict = errors.New("ambiguous Dockerfile source: both stdin
func runBuildBuildKit(dockerCli command.Cli, options buildOptions) error {
ctx := appcontext.Context()
s, err := trySession(dockerCli, options.context)
s, err := trySession(dockerCli, options.context, false)
if err != nil {
return err
}
@ -138,6 +139,13 @@ func runBuildBuildKit(dockerCli command.Cli, options buildOptions) error {
}
s.Allow(sp)
}
if len(options.ssh) > 0 {
sshp, err := parseSSHSpecs(options.ssh)
if err != nil {
return errors.Wrapf(err, "could not parse ssh: %v", options.ssh)
}
s.Allow(sshp)
}
eg, ctx := errgroup.WithContext(ctx)
@ -291,7 +299,7 @@ func doBuild(ctx context.Context, eg *errgroup.Group, dockerCli command.Cli, opt
return err
}
func resetUIDAndGID(s *fsutil.Stat) bool {
func resetUIDAndGID(s *fsutiltypes.Stat) bool {
s.Uid = 0
s.Gid = 0
return true
@ -408,3 +416,26 @@ func parseSecret(value string) (*secretsprovider.FileSource, error) {
}
return &fs, nil
}
func parseSSHSpecs(sl []string) (session.Attachable, error) {
configs := make([]sshprovider.AgentConfig, 0, len(sl))
for _, v := range sl {
c, err := parseSSH(v)
if err != nil {
return nil, err
}
configs = append(configs, *c)
}
return sshprovider.NewSSHAgentProvider(configs)
}
func parseSSH(value string) (*sshprovider.AgentConfig, error) {
parts := strings.SplitN(value, "=", 2)
cfg := sshprovider.AgentConfig{
ID: parts[0],
}
if len(parts) > 1 {
cfg.Paths = strings.Split(parts[1], ",")
}
return &cfg, nil
}

View File

@ -27,16 +27,16 @@ import (
const clientSessionRemote = "client-session"
func isSessionSupported(dockerCli command.Cli) bool {
if versions.GreaterThanOrEqualTo(dockerCli.Client().ClientVersion(), "1.39") {
func isSessionSupported(dockerCli command.Cli, forStream bool) bool {
if !forStream && versions.GreaterThanOrEqualTo(dockerCli.Client().ClientVersion(), "1.39") {
return true
}
return dockerCli.ServerInfo().HasExperimental && versions.GreaterThanOrEqualTo(dockerCli.Client().ClientVersion(), "1.31")
}
func trySession(dockerCli command.Cli, contextDir string) (*session.Session, error) {
func trySession(dockerCli command.Cli, contextDir string, forStream bool) (*session.Session, error) {
var s *session.Session
if isSessionSupported(dockerCli) {
if isSessionSupported(dockerCli, forStream) {
sharedKey, err := getBuildSharedKey(contextDir)
if err != nil {
return nil, errors.Wrap(err, "failed to get build shared key")

View File

@ -3,11 +3,14 @@ package image
import (
"context"
"fmt"
"strings"
"github.com/docker/cli/cli"
"github.com/docker/cli/cli/command"
"github.com/docker/cli/opts"
"github.com/docker/docker/api/types/filters"
units "github.com/docker/go-units"
"github.com/pkg/errors"
"github.com/spf13/cobra"
)
@ -54,8 +57,24 @@ Are you sure you want to continue?`
Are you sure you want to continue?`
)
// cloneFilter is a temporary workaround that uses existing public APIs from the filters package to clone a filter.
// TODO(tiborvass): remove this once filters.Args.Clone() is added.
func cloneFilter(args filters.Args) (newArgs filters.Args, err error) {
if args.Len() == 0 {
return filters.NewArgs(), nil
}
b, err := args.MarshalJSON()
if err != nil {
return newArgs, err
}
return filters.FromJSON(string(b))
}
func runPrune(dockerCli command.Cli, options pruneOptions) (spaceReclaimed uint64, output string, err error) {
pruneFilters := options.filter.Value()
pruneFilters, err := cloneFilter(options.filter.Value())
if err != nil {
return 0, "", errors.Wrap(err, "could not copy filter in image prune")
}
pruneFilters.Add("dangling", fmt.Sprintf("%v", !options.all))
pruneFilters = command.PruneFilters(dockerCli, pruneFilters)
@ -73,14 +92,20 @@ func runPrune(dockerCli command.Cli, options pruneOptions) (spaceReclaimed uint6
}
if len(report.ImagesDeleted) > 0 {
output = "Deleted Images:\n"
var sb strings.Builder
sb.WriteString("Deleted Images:\n")
for _, st := range report.ImagesDeleted {
if st.Untagged != "" {
output += fmt.Sprintln("untagged:", st.Untagged)
sb.WriteString("untagged: ")
sb.WriteString(st.Untagged)
sb.WriteByte('\n')
} else {
output += fmt.Sprintln("deleted:", st.Deleted)
sb.WriteString("deleted: ")
sb.WriteString(st.Deleted)
sb.WriteByte('\n')
}
}
output = sb.String()
spaceReclaimed = report.SpaceReclaimed
}

View File

@ -70,6 +70,14 @@ func TestNewPruneCommandSuccess(t *testing.T) {
}, nil
},
},
{
name: "label-filter",
args: []string{"--force", "--filter", "label=foobar"},
imagesPruneFunc: func(pruneFilter filters.Args) (types.ImagesPruneReport, error) {
assert.Check(t, is.Equal("foobar", pruneFilter.Get("label")[0]))
return types.ImagesPruneReport{}, nil
},
},
{
name: "force-untagged",
args: []string{"--force"},

View File

@ -0,0 +1 @@
Total reclaimed space: 0B

View File

@ -18,6 +18,7 @@ type osArch struct {
// list of valid os/arch values (see "Optional Environment Variables" section
// of https://golang.org/doc/install/source
// Added linux/s390x as we know System z support already exists
// Keep in sync with _docker_manifest_annotate in contrib/completion/bash/docker
var validOSArches = map[osArch]bool{
{os: "darwin", arch: "386"}: true,
{os: "darwin", arch: "amd64"}: true,

View File

@ -302,6 +302,12 @@ func updateService(ctx context.Context, apiClient client.NetworkAPIClient, flags
if task.Resources == nil {
task.Resources = &swarm.ResourceRequirements{}
}
if task.Resources.Limits == nil {
task.Resources.Limits = &swarm.Resources{}
}
if task.Resources.Reservations == nil {
task.Resources.Reservations = &swarm.Resources{}
}
return task.Resources
}

View File

@ -617,6 +617,38 @@ func TestUpdateIsolationValid(t *testing.T) {
// and that values are not updated are not reset to their default value
func TestUpdateLimitsReservations(t *testing.T) {
spec := swarm.ServiceSpec{
TaskTemplate: swarm.TaskSpec{
ContainerSpec: &swarm.ContainerSpec{},
},
}
// test that updating works if the service did not previously
// have limits set (https://github.com/moby/moby/issues/38363)
flags := newUpdateCommand(nil).Flags()
err := flags.Set(flagLimitCPU, "2")
assert.NilError(t, err)
err = flags.Set(flagLimitMemory, "200M")
assert.NilError(t, err)
err = updateService(context.Background(), nil, flags, &spec)
assert.NilError(t, err)
spec = swarm.ServiceSpec{
TaskTemplate: swarm.TaskSpec{
ContainerSpec: &swarm.ContainerSpec{},
},
}
// test that updating works if the service did not previously
// have reservations set (https://github.com/moby/moby/issues/38363)
flags = newUpdateCommand(nil).Flags()
err = flags.Set(flagReserveCPU, "2")
assert.NilError(t, err)
err = flags.Set(flagReserveMemory, "200M")
assert.NilError(t, err)
err = updateService(context.Background(), nil, flags, &spec)
assert.NilError(t, err)
spec = swarm.ServiceSpec{
TaskTemplate: swarm.TaskSpec{
ContainerSpec: &swarm.ContainerSpec{},
Resources: &swarm.ResourceRequirements{
@ -632,8 +664,8 @@ func TestUpdateLimitsReservations(t *testing.T) {
},
}
flags := newUpdateCommand(nil).Flags()
err := flags.Set(flagLimitCPU, "2")
flags = newUpdateCommand(nil).Flags()
err = flags.Set(flagLimitCPU, "2")
assert.NilError(t, err)
err = flags.Set(flagReserveCPU, "2")
assert.NilError(t, err)

View File

@ -73,11 +73,10 @@ func runPrune(dockerCli command.Cli, options pruneOptions) error {
if options.pruneVolumes {
pruneFuncs = append(pruneFuncs, volume.RunPrune)
}
pruneFuncs = append(pruneFuncs, image.RunPrune)
if options.pruneBuildCache {
pruneFuncs = append(pruneFuncs, builder.CachePrune)
}
// FIXME: modify image.RunPrune to not modify options.filter, otherwise this has to be last in the list.
pruneFuncs = append(pruneFuncs, image.RunPrune)
var spaceReclaimed uint64
for _, pruneFn := range pruneFuncs {

View File

@ -148,7 +148,7 @@ func TestAddStageSigners(t *testing.T) {
assert.NilError(t, err)
changeList := cl.List()
assert.Check(t, is.Len(changeList, 4))
// ordering is determinstic:
// ordering is deterministic:
// first change is for targets/user key creation
newSignerKeyChange := changeList[0]

View File

@ -33,6 +33,9 @@ func newDockerCommand(dockerCli *command.DockerCli) *cobra.Command {
SilenceErrors: true,
TraverseChildren: true,
Args: noArgs,
RunE: func(cmd *cobra.Command, args []string) error {
return command.ShowHelp(dockerCli.Err())(cmd, args)
},
PersistentPreRunE: func(cmd *cobra.Command, args []string) error {
// flags must be the top-level command flags, not cmd.Flags()
opts.Common.SetDefaultOptions(flags)
@ -100,8 +103,10 @@ func setHelpFunc(dockerCli *command.DockerCli, cmd *cobra.Command, flags *pflag.
ccmd.Println(err)
return
}
hideUnsupportedFeatures(ccmd, dockerCli)
if err := hideUnsupportedFeatures(ccmd, dockerCli); err != nil {
ccmd.Println(err)
return
}
defaultHelpFunc(ccmd, args)
})
}
@ -235,15 +240,21 @@ func hideFeatureSubCommand(subcmd *cobra.Command, hasFeature bool, annotation st
}
}
func hideUnsupportedFeatures(cmd *cobra.Command, details versionDetails) {
func hideUnsupportedFeatures(cmd *cobra.Command, details versionDetails) error {
clientVersion := details.Client().ClientVersion()
osType := details.ServerInfo().OSType
hasExperimental := details.ServerInfo().HasExperimental
hasExperimentalCLI := details.ClientInfo().HasExperimental
hasBuildKit, err := command.BuildKitEnabled(details.ServerInfo())
if err != nil {
return err
}
cmd.Flags().VisitAll(func(f *pflag.Flag) {
hideFeatureFlag(f, hasExperimental, "experimental")
hideFeatureFlag(f, hasExperimentalCLI, "experimentalCLI")
hideFeatureFlag(f, hasBuildKit, "buildkit")
hideFeatureFlag(f, !hasBuildKit, "no-buildkit")
// hide flags not supported by the server
if !isOSTypeSupported(f, osType) || !isVersionSupported(f, clientVersion) {
f.Hidden = true
@ -259,6 +270,8 @@ func hideUnsupportedFeatures(cmd *cobra.Command, details versionDetails) {
for _, subcmd := range cmd.Commands() {
hideFeatureSubCommand(subcmd, hasExperimental, "experimental")
hideFeatureSubCommand(subcmd, hasExperimentalCLI, "experimentalCLI")
hideFeatureSubCommand(subcmd, hasBuildKit, "buildkit")
hideFeatureSubCommand(subcmd, !hasBuildKit, "no-buildkit")
// hide subcommands not supported by the server
if subcmdVersion, ok := subcmd.Annotations["version"]; ok && versions.LessThan(clientVersion, subcmdVersion) {
subcmd.Hidden = true
@ -267,6 +280,7 @@ func hideUnsupportedFeatures(cmd *cobra.Command, details versionDetails) {
subcmd.Hidden = true
}
}
return nil
}
// Checks if a command or one of its ancestors is in the list
@ -313,6 +327,7 @@ func areFlagsSupported(cmd *cobra.Command, details versionDetails) error {
if _, ok := f.Annotations["experimentalCLI"]; ok && !hasExperimentalCLI {
errs = append(errs, fmt.Sprintf("\"--%s\" is on a Docker cli with experimental cli features enabled", f.Name))
}
// buildkit-specific flags are noop when buildkit is not enabled, so we do not add an error in that case
}
})
if len(errs) > 0 {

View File

@ -1,6 +1,7 @@
package main
import (
"bytes"
"io/ioutil"
"os"
"testing"
@ -31,3 +32,20 @@ func TestExitStatusForInvalidSubcommandWithHelpFlag(t *testing.T) {
err := cmd.Execute()
assert.Error(t, err, "unknown help topic: invalid")
}
func TestExitStatusForInvalidSubcommand(t *testing.T) {
discard := ioutil.Discard
cmd := newDockerCommand(command.NewDockerCli(os.Stdin, discard, discard, false, nil))
cmd.SetArgs([]string{"invalid"})
err := cmd.Execute()
assert.Check(t, is.ErrorContains(err, "docker: 'invalid' is not a docker command."))
}
func TestVersion(t *testing.T) {
var b bytes.Buffer
cmd := newDockerCommand(command.NewDockerCli(os.Stdin, &b, &b, false, nil))
cmd.SetArgs([]string{"--version"})
err := cmd.Execute()
assert.NilError(t, err)
assert.Check(t, is.Contains(b.String(), "Docker version"))
}

View File

@ -563,23 +563,39 @@ __docker_append_to_completions() {
COMPREPLY=( ${COMPREPLY[@]/%/"$1"} )
}
# __docker_daemon_is_experimental tests whether the currently configured Docker
# daemon runs in experimental mode. If so, the function exits with 0 (true).
# Otherwise, or if the result cannot be determined, the exit value is 1 (false).
__docker_daemon_is_experimental() {
[ "$(__docker_q version -f '{{.Server.Experimental}}')" = "true" ]
# __docker_fetch_info fetches information about the configured Docker server and updates
# several variables with the results.
# The result is cached for the duration of one invocation of bash completion.
__docker_fetch_info() {
if [ -z "$info_fetched" ] ; then
read -r client_experimental server_experimental server_os < <(__docker_q version -f '{{.Client.Experimental}} {{.Server.Experimental}} {{.Server.Os}}')
info_fetched=true
fi
}
# __docker_daemon_os_is tests whether the currently configured Docker daemon runs
# __docker_client_is_experimental tests whether the Docker cli is configured to support
# experimental features. If so, the function exits with 0 (true).
# Otherwise, or if the result cannot be determined, the exit value is 1 (false).
__docker_client_is_experimental() {
__docker_fetch_info
[ "$client_experimental" = "true" ]
}
# __docker_server_is_experimental tests whether the currently configured Docker
# server runs in experimental mode. If so, the function exits with 0 (true).
# Otherwise, or if the result cannot be determined, the exit value is 1 (false).
__docker_server_is_experimental() {
__docker_fetch_info
[ "$server_experimental" = "true" ]
}
# __docker_server_os_is tests whether the currently configured Docker server runs
# on the operating system passed in as the first argument.
# It does so by querying the daemon for its OS. The result is cached for the duration
# of one invocation of bash completion so that this function can be used to test for
# several different operating systems without additional costs.
# Known operating systems: linux, windows.
__docker_daemon_os_is() {
__docker_server_os_is() {
local expected_os="$1"
local actual_os=${daemon_os=$(__docker_q version -f '{{.Server.Os}}')}
[ "$actual_os" = "$expected_os" ]
__docker_fetch_info
[ "$server_os" = "$expected_os" ]
}
# __docker_stack_orchestrator_is tests whether the client is configured to use
@ -865,6 +881,7 @@ __docker_complete_log_drivers() {
gelf
journald
json-file
local
logentries
none
splunk
@ -888,7 +905,8 @@ __docker_complete_log_options() {
local gcplogs_options="$common_options1 $common_options2 gcp-log-cmd gcp-meta-id gcp-meta-name gcp-meta-zone gcp-project"
local gelf_options="$common_options1 $common_options2 gelf-address gelf-compression-level gelf-compression-type gelf-tcp-max-reconnect gelf-tcp-reconnect-delay tag"
local journald_options="$common_options1 $common_options2 tag"
local json_file_options="$common_options1 $common_options2 max-file max-size"
local json_file_options="$common_options1 $common_options2 compress max-file max-size"
local local_options="$common_options1 compress max-file max-size"
local logentries_options="$common_options1 $common_options2 line-only logentries-token tag"
local splunk_options="$common_options1 $common_options2 splunk-caname splunk-capath splunk-format splunk-gzip splunk-gzip-level splunk-index splunk-insecureskipverify splunk-source splunk-sourcetype splunk-token splunk-url splunk-verify-connection tag"
local syslog_options="$common_options1 $common_options2 syslog-address syslog-facility syslog-format syslog-tls-ca-cert syslog-tls-cert syslog-tls-key syslog-tls-skip-verify tag"
@ -917,6 +935,9 @@ __docker_complete_log_options() {
json-file)
COMPREPLY=( $( compgen -W "$json_file_options" -S = -- "$cur" ) )
;;
local)
COMPREPLY=( $( compgen -W "$local_options" -S = -- "$cur" ) )
;;
logentries)
COMPREPLY=( $( compgen -W "$logentries_options" -S = -- "$cur" ) )
;;
@ -946,7 +967,7 @@ __docker_complete_log_driver_options() {
__docker_nospace
return
;;
fluentd-async-connect)
compress|fluentd-async-connect)
COMPREPLY=( $( compgen -W "false true" -- "${cur##*=}" ) )
return
;;
@ -1128,7 +1149,8 @@ _docker_docker() {
*)
local counter=$( __docker_pos_first_nonflag "$(__docker_to_extglob "$global_options_with_args")" )
if [ "$cword" -eq "$counter" ]; then
__docker_daemon_is_experimental && commands+=(${experimental_commands[*]})
__docker_client_is_experimental && commands+=(${experimental_client_commands[*]})
__docker_server_is_experimental && commands+=(${experimental_server_commands[*]})
COMPREPLY=( $( compgen -W "${commands[*]} help" -- "$cur" ) )
fi
;;
@ -1837,14 +1859,14 @@ _docker_container_run_and_create() {
--volume -v
--workdir -w
"
__docker_daemon_os_is windows && options_with_args+="
__docker_server_os_is windows && options_with_args+="
--cpu-count
--cpu-percent
--io-maxbandwidth
--io-maxiops
--isolation
"
__docker_daemon_is_experimental && options_with_args+="
__docker_server_is_experimental && options_with_args+="
--platform
"
@ -1960,7 +1982,7 @@ _docker_container_run_and_create() {
return
;;
--isolation)
if __docker_daemon_os_is windows ; then
if __docker_server_os_is windows ; then
__docker_complete_isolation
return
fi
@ -2071,12 +2093,12 @@ _docker_container_start() {
__docker_complete_detach_keys && return
case "$prev" in
--checkpoint)
if __docker_daemon_is_experimental ; then
if __docker_server_is_experimental ; then
return
fi
;;
--checkpoint-dir)
if __docker_daemon_is_experimental ; then
if __docker_server_is_experimental ; then
_filedir -d
return
fi
@ -2086,7 +2108,7 @@ _docker_container_start() {
case "$cur" in
-*)
local options="--attach -a --detach-keys --help --interactive -i"
__docker_daemon_is_experimental && options+=" --checkpoint --checkpoint-dir"
__docker_server_is_experimental && options+=" --checkpoint --checkpoint-dir"
COMPREPLY=( $( compgen -W "$options" -- "$cur" ) )
;;
*)
@ -2449,7 +2471,7 @@ _docker_daemon() {
}
_docker_deploy() {
__docker_daemon_is_experimental && _docker_stack_deploy
__docker_server_is_experimental && _docker_stack_deploy
}
_docker_diff() {
@ -2535,7 +2557,7 @@ _docker_image_build() {
--target
--ulimit
"
__docker_daemon_os_is windows && options_with_args+="
__docker_server_os_is windows && options_with_args+="
--isolation
"
@ -2549,7 +2571,7 @@ _docker_image_build() {
--quiet -q
--rm
"
if __docker_daemon_is_experimental ; then
if __docker_server_is_experimental ; then
options_with_args+="
--platform
"
@ -2584,7 +2606,7 @@ _docker_image_build() {
return
;;
--isolation)
if __docker_daemon_os_is windows ; then
if __docker_server_os_is windows ; then
__docker_complete_isolation
return
fi
@ -2664,14 +2686,16 @@ _docker_image_images() {
_docker_image_import() {
case "$prev" in
--change|-c|--message|-m)
--change|-c|--message|-m|--platform)
return
;;
esac
case "$cur" in
-*)
COMPREPLY=( $( compgen -W "--change -c --help --message -m" -- "$cur" ) )
local options="--change -c --help --message -m"
__docker_server_is_experimental && options+=" --platform"
COMPREPLY=( $( compgen -W "$options" -- "$cur" ) )
;;
*)
local counter=$(__docker_pos_first_nonflag '--change|-c|--message|-m')
@ -2779,7 +2803,7 @@ _docker_image_pull() {
case "$cur" in
-*)
local options="--all-tags -a --disable-content-trust=false --help"
__docker_daemon_is_experimental && options+=" --platform"
__docker_server_is_experimental && options+=" --platform"
COMPREPLY=( $( compgen -W "$options" -- "$cur" ) )
;;
@ -3395,7 +3419,6 @@ _docker_service_update_and_create() {
local options_with_args="
--endpoint-mode
--entrypoint
--force
--health-cmd
--health-interval
--health-retries
@ -3431,7 +3454,7 @@ _docker_service_update_and_create() {
--user -u
--workdir -w
"
__docker_daemon_os_is windows && options_with_args+="
__docker_server_os_is windows && options_with_args+="
--credential-spec
"
@ -3520,6 +3543,10 @@ _docker_service_update_and_create() {
--secret-rm
"
boolean_options="$boolean_options
--force
"
case "$prev" in
--env-rm)
COMPREPLY=( $( compgen -e -- "$cur" ) )
@ -3817,6 +3844,109 @@ _docker_swarm_update() {
esac
}
_docker_manifest() {
local subcommands="
annotate
create
inspect
push
"
__docker_subcommands "$subcommands" && return
case "$cur" in
-*)
COMPREPLY=( $( compgen -W "--help" -- "$cur" ) )
;;
*)
COMPREPLY=( $( compgen -W "$subcommands" -- "$cur" ) )
;;
esac
}
_docker_manifest_annotate() {
case "$prev" in
--arch)
COMPREPLY=( $( compgen -W "
386
amd64
arm
arm64
mips64
mips64le
ppc64le
s390x" -- "$cur" ) )
return
;;
--os)
COMPREPLY=( $( compgen -W "
darwin
dragonfly
freebsd
linux
netbsd
openbsd
plan9
solaris
windows" -- "$cur" ) )
return
;;
--os-features|--variant)
return
;;
esac
case "$cur" in
-*)
COMPREPLY=( $( compgen -W "--arch --help --os --os-features --variant" -- "$cur" ) )
;;
*)
local counter=$( __docker_pos_first_nonflag "--arch|--os|--os-features|--variant" )
if [ "$cword" -eq "$counter" ] || [ "$cword" -eq "$((counter + 1))" ]; then
__docker_complete_images --force-tag --id
fi
;;
esac
}
_docker_manifest_create() {
case "$cur" in
-*)
COMPREPLY=( $( compgen -W "--amend -a --help --insecure" -- "$cur" ) )
;;
*)
__docker_complete_images --force-tag --id
;;
esac
}
_docker_manifest_inspect() {
case "$cur" in
-*)
COMPREPLY=( $( compgen -W "--help --insecure --verbose -v" -- "$cur" ) )
;;
*)
local counter=$( __docker_pos_first_nonflag )
if [ "$cword" -eq "$counter" ] || [ "$cword" -eq "$((counter + 1))" ]; then
__docker_complete_images --force-tag --id
fi
;;
esac
}
_docker_manifest_push() {
case "$cur" in
-*)
COMPREPLY=( $( compgen -W "--help --insecure --purge -p" -- "$cur" ) )
;;
*)
local counter=$( __docker_pos_first_nonflag )
if [ "$cword" -eq "$counter" ]; then
__docker_complete_images --force-tag --id
fi
;;
esac
}
_docker_node() {
local subcommands="
demote
@ -4451,7 +4581,7 @@ _docker_stack_deploy() {
case "$cur" in
-*)
local options="--compose-file -c --help --orchestrator"
__docker_daemon_is_experimental && __docker_stack_orchestrator_is swarm && options+=" --bundle-file"
__docker_server_is_experimental && __docker_stack_orchestrator_is swarm && options+=" --bundle-file"
__docker_stack_orchestrator_is kubernetes && options+=" --kubeconfig --namespace"
__docker_stack_orchestrator_is swarm && options+=" --prune --resolve-image --with-registry-auth"
COMPREPLY=( $( compgen -W "$options" -- "$cur" ) )
@ -5074,7 +5204,11 @@ _docker() {
wait
)
local experimental_commands=(
local experimental_client_commands=(
manifest
)
local experimental_server_commands=(
checkpoint
deploy
)
@ -5098,10 +5232,12 @@ _docker() {
--tlskey
"
local host config daemon_os
# variables to cache server info, populated on demand for performance reasons
local info_fetched server_experimental server_os
# variables to cache client info, populated on demand for performance reasons
local stack_orchestrator_is_kubernetes stack_orchestrator_is_swarm
local client_experimental stack_orchestrator_is_kubernetes stack_orchestrator_is_swarm
local host config
COMPREPLY=()
local cur prev words cword

View File

@ -105,7 +105,7 @@ shellcheck: build_shell_validate_image ## run shellcheck validation
docker run -ti --rm $(ENVVARS) $(MOUNTS) $(VALIDATE_IMAGE_NAME) make shellcheck
.PHONY: test-e2e ## run e2e tests
test-e2e: test-e2e-non-experimental test-e2e-experimental
test-e2e: test-e2e-non-experimental test-e2e-experimental test-e2e-connhelper-ssh
.PHONY: test-e2e-experimental
test-e2e-experimental: build_e2e_image
@ -115,6 +115,10 @@ test-e2e-experimental: build_e2e_image
test-e2e-non-experimental: build_e2e_image
docker run --rm -v /var/run/docker.sock:/var/run/docker.sock $(E2E_IMAGE_NAME)
.PHONY: test-e2e-connhelper-ssh
test-e2e-connhelper-ssh: build_e2e_image
docker run -e TEST_CONNHELPER=ssh -e DOCKERD_EXPERIMENTAL=1 --rm -v /var/run/docker.sock:/var/run/docker.sock $(E2E_IMAGE_NAME)
.PHONY: help
help: ## print this help
@awk 'BEGIN {FS = ":.*?## "} /^[a-zA-Z_-]+:.*?## / {sub("\\\\n",sprintf("\n%22c"," "), $$2);printf "\033[36m%-20s\033[0m %s\n", $$1, $$2}' $(MAKEFILE_LIST)

View File

@ -1,4 +1,4 @@
FROM golang:1.10.4-alpine
FROM golang:1.10.8-alpine
RUN apk add -U git bash coreutils gcc musl-dev

View File

@ -1,3 +1,3 @@
FROM dockercore/golang-cross:1.10.4@sha256:55c7b933ac944f4922b673b4d4340d1a0404f3c324bd0b3f13a4326c427b1f2a
FROM dockercore/golang-cross:1.10.8@sha256:a93210f55a8137b4aa4b9f033ac7a80b66ab6337e98e7afb62abe93b4ad73cad
ENV DISABLE_WARN_OUTSIDE_CONTAINER=1
WORKDIR /go/src/github.com/docker/cli

View File

@ -1,5 +1,5 @@
FROM golang:1.10.4-alpine
FROM golang:1.10.8-alpine
RUN apk add -U git make bash coreutils ca-certificates curl

View File

@ -1,4 +1,4 @@
ARG GO_VERSION=1.10.4
ARG GO_VERSION=1.10.8
FROM docker/containerd-shim-process:a4d1531 AS containerd-shim-process
@ -13,6 +13,7 @@ RUN apt-get update && apt-get install -y \
libapparmor-dev \
libseccomp-dev \
iptables \
openssh-client \
&& rm -rf /var/lib/apt/lists/*
ARG COMPOSE_VERSION=1.21.2

View File

@ -1,4 +1,4 @@
FROM golang:1.10.4-alpine
FROM golang:1.10.8-alpine
RUN apk add -U git

View File

@ -19,6 +19,37 @@ The following list of features are deprecated in Engine.
To learn more about Docker Engine's deprecation policy,
see [Feature Deprecation Policy](https://docs.docker.com/engine/#feature-deprecation-policy).
### Legacy "overlay" storage driver
**Deprecated in Release: v18.09.0**
The `overlay` storage driver is deprecated in favor of the `overlay2` storage
driver, which has all the benefits of `overlay`, without its limitations (excessive
inode consumption). The legacy `overlay` storage driver will be removed in a future
release. Users of the `overlay` storage driver should migrate to the `overlay2`
storage driver.
The legacy `overlay` storage driver allowed using overlayFS-backed filesystems
on pre 4.x kernels. Now that all supported distributions are able to run `overlay2`
(as they are either on kernel 4.x, or have support for multiple lowerdirs
backported), there is no reason to keep maintaining the `overlay` storage driver.
### device mapper storage driver
**Deprecated in Release: v18.09.0**
The `devicemapper` storage driver is deprecated in favor of `overlay2`, and will
be removed in a future release. Users of the `devicemapper` storage driver are
recommended to migrate to a different storage driver, such as `overlay2`, which
is now the default storage driver.
The `devicemapper` storage driver facilitates running Docker on older (3.x) kernels
that have no support for other storage drivers (such as overlay2, or AUFS).
Now that support for `overlay2` is added to all supported distros (as they are
either on kernel 4.x, or have support for multiple lowerdirs backported), there
is no reason to continue maintenance of the `devicemapper` storage driver.
### Reserved namespaces in engine labels
**Deprecated in Release: v18.06.0**
@ -167,7 +198,7 @@ The docker login command is removing the ability to automatically register for a
**Target For Removal In Release: v17.06**
The flag `--security-opt` doesn't use the colon separator(`:`) anymore to divide keys and values, it uses the equal symbol(`=`) for consistency with other similar flags, like `--storage-opt`.
The flag `--security-opt` doesn't use the colon separator (`:`) anymore to divide keys and values, it uses the equal symbol (`=`) for consistency with other similar flags, like `--storage-opt`.
### `/containers/(id or name)/copy` endpoint

View File

@ -121,6 +121,28 @@ registries.
When you're done with your build, you're ready to look into [*Pushing a
repository to its registry*](https://docs.docker.com/engine/tutorials/dockerrepos/#/contributing-to-docker-hub).
## BuildKit
Starting with version 18.09, Docker supports a new backend for executing your
builds that is provided by the [moby/buildkit](https://github.com/moby/buildkit)
project. The BuildKit backend provides many benefits compared to the old
implementation. For example, BuildKit can:
* Detect and skip executing unused build stages
* Parallelize building independent build stages
* Incrementally transfer only the changed files in your build context between builds
* Detect and skip transferring unused files in your build context
* Use external Dockerfile implementations with many new features
* Avoid side-effects with rest of the API (intermediate images and containers)
* Prioritize your build cache for automatic pruning
To use the BuildKit backend, you need to set an environment variable
`DOCKER_BUILDKIT=1` on the CLI before invoking `docker build`.
To learn about the experimental Dockerfile syntax available to BuildKit-based
builds [refer to the documentation in the BuildKit repository](https://github.com/moby/buildkit/blob/master/frontend/dockerfile/docs/experimental.md).
## Format
Here is the format of the `Dockerfile`:
@ -224,10 +246,64 @@ following lines are all treated identically:
# dIrEcTiVe=value
```
The following parser directive is supported:
The following parser directives are supported:
* `syntax`
* `escape`
## syntax
# syntax=[remote image reference]
For example:
# syntax=docker/dockerfile
# syntax=docker/dockerfile:1.0
# syntax=docker.io/docker/dockerfile:1
# syntax=docker/dockerfile:1.0.0-experimental
# syntax=example.com/user/repo:tag@sha256:abcdef...
This feature is only enabled if the [BuildKit](#buildkit) backend is used.
The syntax directive defines the location of the Dockerfile builder that is used for
building the current Dockerfile. The BuildKit backend allows to seamlessly use
external implementations of builders that are distributed as Docker images and
execute inside a container sandbox environment.
Custom Dockerfile implementation allows you to:
- Automatically get bugfixes without updating the daemon
- Make sure all users are using the same implementation to build your Dockerfile
- Use the latest features without updating the daemon
- Try out new experimental or third-party features
### Official releases
Docker distributes official versions of the images that can be used for building
Dockerfiles under `docker/dockerfile` repository on Docker Hub. There are two
channels where new images are released: stable and experimental.
Stable channel follows semantic versioning. For example:
- docker/dockerfile:1.0.0 - only allow immutable version 1.0.0
- docker/dockerfile:1.0 - allow versions 1.0.*
- docker/dockerfile:1 - allow versions 1.*.*
- docker/dockerfile:latest - latest release on stable channel
The experimental channel uses incremental versioning with the major and minor
component from the stable channel on the time of the release. For example:
- docker/dockerfile:1.0.1-experimental - only allow immutable version 1.0.1-experimental
- docker/dockerfile:1.0-experimental - latest experimental releases after 1.0
- docker/dockerfile:experimental - latest release on experimental channel
You should choose a channel that best fits your needs. If you only want
bugfixes, you should use `docker/dockerfile:1.0`. If you want to benefit from
experimental features, you should use the experimental channel. If you are using
the experimental channel, newer releases may not be backwards compatible, so it
is recommended to use an immutable full version variant.
For master builds and nightly feature releases refer to the description in [the source repository](https://github.com/moby/buildkit/blob/master/README.md).
## escape
# escape=\ (backslash)
@ -1339,6 +1415,10 @@ The table below shows what command is executed for different `ENTRYPOINT` / `CMD
| **CMD ["p1_cmd", "p2_cmd"]** | p1_cmd p2_cmd | /bin/sh -c exec_entry p1_entry | exec_entry p1_entry p1_cmd p2_cmd |
| **CMD exec_cmd p1_cmd** | /bin/sh -c exec_cmd p1_cmd | /bin/sh -c exec_entry p1_entry | exec_entry p1_entry /bin/sh -c exec_cmd p1_cmd |
> **Note**: If `CMD` is defined from the base image, setting `ENTRYPOINT` will
> reset `CMD` to an empty value. In this scenario, `CMD` must be defined in the
> current image to have a value.
## VOLUME
VOLUME ["/data"]
@ -1379,7 +1459,7 @@ Keep the following things in mind about volumes in the `Dockerfile`.
data within the volume after it has been declared, those changes will be discarded.
- **JSON formatting**: The list is parsed as a JSON array.
You must enclose words with double quotes (`"`)rather than single quotes (`'`).
You must enclose words with double quotes (`"`) rather than single quotes (`'`).
- **The host directory is declared at container run-time**: The host directory
(the mountpoint) is, by its nature, host-dependent. This is to preserve image
@ -1623,6 +1703,38 @@ RUN echo "Hello World"
When building this Dockerfile, the `HTTP_PROXY` is preserved in the
`docker history`, and changing its value invalidates the build cache.
### Automatic platform ARGs in the global scope
This feature is only available when using the [BuildKit](#buildkit) backend.
Docker predefines a set of `ARG` variables with information on the platform of
the node performing the build (build platform) and on the platform of the
resulting image (target platform). The target platform can be specified with
the `--platform` flag on `docker build`.
The following `ARG` variables are set automatically:
* `TARGETPLATFORM` - platform of the build result. Eg `linux/amd64`, `linux/arm/v7`, `windows/amd64`.
* `TARGETOS` - OS component of TARGETPLATFORM
* `TARGETARCH` - architecture component of TARGETPLATFORM
* `TARGETVARIANT` - variant component of TARGETPLATFORM
* `BUILDPLATFORM` - platform of the node performing the build.
* `BUILDOS` - OS component of BUILDPLATFORM
* `BUILDARCH` - OS component of BUILDPLATFORM
* `BUILDVARIANT` - OS component of BUILDPLATFORM
These arguments are defined in the global scope so are not automatically
available inside build stages or for your `RUN` commands. To expose one of
these arguments inside the build stage redefine it without value.
For example:
```Dockerfile
FROM alpine
ARG TARGETPLATFORM
RUN echo "I'm building for $TARGETPLATFORM"
```
### Impact on build caching
`ARG` variables are not persisted into the built image as `ENV` variables are.
@ -1931,6 +2043,14 @@ required such as `zsh`, `csh`, `tcsh` and others.
The `SHELL` feature was added in Docker 1.12.
## External implementation features
This feature is only available when using the [BuildKit](#buildkit) backend.
Docker build supports experimental features like cache mounts, build secrets and
ssh forwarding that are enabled by using an external implementation of the
builder with a syntax directive. To learn about these features, [refer to the documentation in BuildKit repository](https://github.com/moby/buildkit/blob/master/frontend/dockerfile/docs/experimental.md).
## Dockerfile examples
Below you can see some examples of Dockerfile syntax. If you're interested in

View File

@ -59,6 +59,7 @@ Options:
Unit is optional and can be `b` (bytes), `k` (kilobytes), `m` (megabytes),
or `g` (gigabytes). If you omit the unit, the system uses bytes.
--squash Squash newly built layers into a single new layer (**Experimental Only**)
--ssh SSH agent socket or keys to expose to the build (only if BuildKit enabled) (format: default|<id>[=<socket>|<key>[,<key>]])
-t, --tag value Name and optionally a tag in the 'name:tag' format (default [])
--target string Set the target build stage to build.
--ulimit value Ulimit options (default [])

View File

@ -85,7 +85,7 @@ Options:
--memory-reservation string Memory soft limit
--memory-swap string Swap limit equal to memory plus swap: '-1' to enable unlimited swap
--memory-swappiness int Tune container memory swappiness (0 to 100) (default -1)
--mount value Attach a filesytem mount to the container (default [])
--mount value Attach a filesystem mount to the container (default [])
--name string Assign a name to the container
--network-alias value Add network-scoped alias for the container (default [])
--network string Connect a container to a network (default "default")
@ -256,5 +256,5 @@ docker create --device-cgroup-rule='c 42:* rmw' -name my-container my-image
Then, a user could ask `udev` to execute a script that would `docker exec my-container mknod newDevX c 42 <minor>`
the required device when it is added.
NOTE: initially present devices still need to be explicitely added to
NOTE: initially present devices still need to be explicitly added to
the create/run command

View File

@ -191,7 +191,10 @@ $ docker -H ssh://example.com ps
```
To use SSH connection, you need to set up `ssh` so that it can reach the
remote host with public key authentication.
remote host with public key authentication. Password authentication is not
supported. If your key is protected with passphrase, you need to set up
`ssh-agent`.
Also, you need to have `docker` binary 18.09 or later on the daemon host.
#### Bind Docker to another host/port or a Unix socket
@ -1302,8 +1305,13 @@ This is a full example of the allowed configuration options on Linux:
"storage-opts": [],
"labels": [],
"live-restore": true,
"log-driver": "",
"log-opts": {},
"log-driver": "json-file",
"log-opts": {
"max-size": "10m",
"max-files":"5",
"labels": "somelabel",
"env": "os,customer"
},
"mtu": 0,
"pidfile": "",
"cluster-store": "",
@ -1327,7 +1335,13 @@ This is a full example of the allowed configuration options on Linux:
"userns-remap": "",
"group": "",
"cgroup-parent": "",
"default-ulimits": {},
"default-ulimits": {
"nofile": {
"Name": "nofile",
"Hard": 64000,
"Soft": 64000
}
},
"init": false,
"init-path": "/usr/libexec/docker-init",
"ipv6": false,
@ -1424,6 +1438,16 @@ This is a full example of the allowed configuration options on Windows:
}
```
#### Feature options
The optional field `features` in `daemon.json` allows users to enable or disable specific
daemon features. For example, `{"features":{"buildkit": true}}` enables `buildkit` as the
default docker image builder.
The list of currently supported feature options:
- `buildkit`: It enables `buildkit` as default builder when set to `true` or disables it by
`false`. Note that if this option is not explicitly set in the daemon config file, then it
is up to the cli to determine which builder to invoke.
#### Configuration reload behavior
Some options can be reconfigured when the daemon is running without requiring

View File

@ -85,6 +85,8 @@ you can download them from:
- Microsoft Windows Credential Manager: https://github.com/docker/docker-credential-helpers/releases
- [pass](https://www.passwordstore.org/): https://github.com/docker/docker-credential-helpers/releases
#### Configure the credentials store
You need to specify the credentials store in `$HOME/.docker/config.json`
to tell the docker engine to use it. The value of the config property should be
the suffix of the program to use (i.e. everything after `docker-credential-`).
@ -99,7 +101,7 @@ For example, to use `docker-credential-osxkeychain`:
If you are currently logged in, run `docker logout` to remove
the credentials from the file and run `docker login` again.
### Default behavior
#### Default behavior
By default, Docker looks for the native binary on each of the platforms, i.e.
"osxkeychain" on macOS, "wincred" on windows, and "pass" on Linux. A special
@ -108,7 +110,7 @@ it cannot find the "pass" binary. If none of these binaries are present, it
stores the credentials (i.e. password) in base64 encoding in the config files
described above.
### Credential helper protocol
#### Credential helper protocol
Credential helpers can be any program or script that follows a very simple protocol.
This protocol is heavily inspired by Git, but it differs in the information shared.
@ -162,7 +164,7 @@ designated programs to handle credentials for *specific registries*. The default
credential store (`credsStore` or the config file itself) will not be used for
operations concerning credentials of the specified registries.
### Logging out
#### Configure credential helpers
If you are currently logged in, run `docker logout` to remove
the credentials from the default store.
@ -182,3 +184,7 @@ For example:
}
}
```
## Related commands
* [logout](logout.md)

View File

@ -30,3 +30,7 @@ Options:
```bash
$ docker logout localhost:8080
```
## Related commands
* [login](login.md)

View File

@ -46,11 +46,11 @@ $ docker rmi fd484f19954f
Error: Conflict, cannot delete image fd484f19954f because it is tagged in multiple repositories, use -f to force
2013/12/11 05:47:16 Error: failed to remove one or more images
$ docker rmi test1
$ docker rmi test1:latest
Untagged: test1:latest
$ docker rmi test2
$ docker rmi test2:latest
Untagged: test2:latest
@ -60,7 +60,7 @@ $ docker images
REPOSITORY TAG IMAGE ID CREATED SIZE
test latest fd484f19954f 23 seconds ago 7 B (virtual 4.964 MB)
$ docker rmi test
$ docker rmi test:latest
Untagged: test:latest
Deleted: fd484f19954f4920da7ff372b5067f5b7ddb2fd3830cecd17b96ea9e286ba5b8

View File

@ -418,7 +418,7 @@ $ docker run -l my-label --label com.example.foo=bar ubuntu bash
```
The `my-label` key doesn't specify a value so the label defaults to an empty
string(`""`). To add multiple labels, repeat the label flag (`-l` or `--label`).
string (`""`). To add multiple labels, repeat the label flag (`-l` or `--label`).
The `key=value` must be unique to avoid overwriting the label value. If you
specify labels with identical keys but different values, each subsequent value

View File

@ -1085,7 +1085,7 @@ per second from `/dev/sda`:
$ docker run -it --device-read-bps /dev/sda:1mb ubuntu
The `--device-write-bps` flag limits the write rate (bytes per second)to a device.
The `--device-write-bps` flag limits the write rate (bytes per second) to a device.
For example, this command creates a container and limits the write rate to `1mb`
per second for `/dev/sda`:
@ -1555,7 +1555,7 @@ The example below mounts an empty tmpfs into the container with the `rw`,
If neither 'rw' or 'ro' is specified then the volume is mounted in
read-write mode.
The `nocopy` modes is used to disable automatic copying requested volume
The `nocopy` mode is used to disable automatically copying the requested volume
path in the container to the volume storage location.
For named volumes, `copy` is the default mode. Copy modes are not supported
for bind-mounted volumes.

View File

@ -22,6 +22,7 @@ func generateCliYaml(opts *options) error {
dockerCli := command.NewDockerCli(stdin, stdout, stderr, false, nil)
cmd := &cobra.Command{Use: "docker"}
commands.AddCommands(cmd, dockerCli)
disableFlagsInUseLine(cmd)
source := filepath.Join(opts.source, descriptionSourcePath)
if err := loadLongDescription(cmd, source); err != nil {
return err
@ -31,6 +32,23 @@ func generateCliYaml(opts *options) error {
return GenYamlTree(cmd, opts.target)
}
func disableFlagsInUseLine(cmd *cobra.Command) {
visitAll(cmd, func(ccmd *cobra.Command) {
// do not add a `[flags]` to the end of the usage line.
ccmd.DisableFlagsInUseLine = true
})
}
// visitAll will traverse all commands from the root.
// This is different from the VisitAll of cobra.Command where only parents
// are checked.
func visitAll(root *cobra.Command, fn func(*cobra.Command)) {
for _, cmd := range root.Commands() {
visitAll(cmd, fn)
}
fn(root)
}
func loadLongDescription(cmd *cobra.Command, path ...string) error {
for _, cmd := range cmd.Commands() {
if cmd.Name() == "" {

View File

@ -0,0 +1,9 @@
version: '2.1'
services:
engine:
build:
context: ./testdata
dockerfile: Dockerfile.connhelper-ssh
environment:
- TEST_CONNHELPER_SSH_ID_RSA_PUB

14
e2e/testdata/Dockerfile.connhelper-ssh vendored Normal file
View File

@ -0,0 +1,14 @@
FROM docker:test-dind
RUN apk --no-cache add shadow openssh-server && \
groupadd -f docker && \
useradd -m penguin && \
usermod -aG docker penguin && \
usermod -p $(head -c32 /dev/urandom | base64) penguin && \
chsh -s /bin/sh penguin && \
ssh-keygen -A
# workaround: ssh session excludes /usr/local/bin from $PATH
RUN ln -s /usr/local/bin/docker /usr/bin/docker
COPY ./connhelper-ssh/entrypoint.sh /
EXPOSE 22
ENTRYPOINT ["/entrypoint.sh"]
# usage: docker run --privileged -e TEST_CONNHELPER_SSH_ID_RSA_PUB=$(cat ~/.ssh/id_rsa.pub) -p 22 $THIS_IMAGE

8
e2e/testdata/connhelper-ssh/entrypoint.sh vendored Executable file
View File

@ -0,0 +1,8 @@
#!/bin/sh
set -ex
mkdir -m 0700 -p /home/penguin/.ssh
echo ${TEST_CONNHELPER_SSH_ID_RSA_PUB} > /home/penguin/.ssh/authorized_keys
chmod 0600 /home/penguin/.ssh/authorized_keys
chown -R penguin:penguin /home/penguin
/usr/sbin/sshd -E /var/log/sshd.log
exec dockerd-entrypoint.sh $@

View File

@ -179,5 +179,5 @@ func getReleaseNotesURL(imageName string) string {
versionTag = taggedRef.Tag()
}
}
return fmt.Sprintf("%s/%s", clitypes.ReleaseNotePrefix, versionTag)
return fmt.Sprintf("%s?%s", clitypes.ReleaseNotePrefix, versionTag)
}

View File

@ -290,11 +290,11 @@ func TestActivateDoUpdateVerifyImageName(t *testing.T) {
func TestGetReleaseNotesURL(t *testing.T) {
imageName := "bogus image name #$%&@!"
url := getReleaseNotesURL(imageName)
assert.Equal(t, url, clitypes.ReleaseNotePrefix+"/")
assert.Equal(t, url, clitypes.ReleaseNotePrefix+"?")
imageName = "foo.bar/valid/repowithouttag"
url = getReleaseNotesURL(imageName)
assert.Equal(t, url, clitypes.ReleaseNotePrefix+"/")
assert.Equal(t, url, clitypes.ReleaseNotePrefix+"?")
imageName = "foo.bar/valid/repowithouttag:tag123"
url = getReleaseNotesURL(imageName)
assert.Equal(t, url, clitypes.ReleaseNotePrefix+"/tag123")
assert.Equal(t, url, clitypes.ReleaseNotePrefix+"?tag123")
}

View File

@ -13,7 +13,7 @@ type (
getHubUserOrgsFunc func(ctx context.Context, authToken string) (orgs []model.Org, err error)
getHubUserByNameFunc func(ctx context.Context, username string) (user *model.User, err error)
verifyLicenseFunc func(ctx context.Context, license model.IssuedLicense) (res *model.CheckResponse, err error)
generateNewTrialSubscriptionFunc func(ctx context.Context, authToken, dockerID, email string) (subscriptionID string, err error)
generateNewTrialSubscriptionFunc func(ctx context.Context, authToken, dockerID string) (subscriptionID string, err error)
listSubscriptionsFunc func(ctx context.Context, authToken, dockerID string) (response []*model.Subscription, err error)
listSubscriptionsDetailsFunc func(ctx context.Context, authToken, dockerID string) (response []*model.SubscriptionDetail, err error)
downloadLicenseFromHubFunc func(ctx context.Context, authToken, subscriptionID string) (license *model.IssuedLicense, err error)
@ -52,9 +52,9 @@ func (c *fakeLicensingClient) VerifyLicense(ctx context.Context, license model.I
return nil, nil
}
func (c *fakeLicensingClient) GenerateNewTrialSubscription(ctx context.Context, authToken, dockerID, email string) (subscriptionID string, err error) {
func (c *fakeLicensingClient) GenerateNewTrialSubscription(ctx context.Context, authToken, dockerID string) (subscriptionID string, err error) {
if c.generateNewTrialSubscriptionFunc != nil {
return c.generateNewTrialSubscriptionFunc(ctx, authToken, dockerID, email)
return c.generateNewTrialSubscriptionFunc(ctx, authToken, dockerID)
}
return "", nil
}

View File

@ -134,7 +134,7 @@ func (u HubUser) GetAvailableLicenses(ctx context.Context) ([]LicenseDisplay, er
// GenerateTrialLicense will generate a new trial license for the specified user or org
func (u HubUser) GenerateTrialLicense(ctx context.Context, targetID string) (*model.IssuedLicense, error) {
subID, err := u.Client.GenerateNewTrialSubscription(ctx, u.token, targetID, u.User.Email)
subID, err := u.Client.GenerateNewTrialSubscription(ctx, u.token, targetID)
if err != nil {
return nil, err
}

View File

@ -147,7 +147,7 @@ func TestGenerateTrialFail(t *testing.T) {
ctx := context.Background()
user := HubUser{
Client: &fakeLicensingClient{
generateNewTrialSubscriptionFunc: func(ctx context.Context, authToken, dockerID, email string) (subscriptionID string, err error) {
generateNewTrialSubscriptionFunc: func(ctx context.Context, authToken, dockerID string) (subscriptionID string, err error) {
return "", fmt.Errorf("generate trial failure")
},
},
@ -161,7 +161,7 @@ func TestGenerateTrialHappy(t *testing.T) {
ctx := context.Background()
user := HubUser{
Client: &fakeLicensingClient{
generateNewTrialSubscriptionFunc: func(ctx context.Context, authToken, dockerID, email string) (subscriptionID string, err error) {
generateNewTrialSubscriptionFunc: func(ctx context.Context, authToken, dockerID string) (subscriptionID string, err error) {
return "subid", nil
},
},

View File

@ -1,15 +1,13 @@
package environment
import (
"context"
"os"
"strings"
"testing"
"time"
"github.com/docker/docker/client"
"github.com/pkg/errors"
"gotest.tools/assert"
"gotest.tools/icmd"
"gotest.tools/poll"
"gotest.tools/skip"
)
@ -79,21 +77,14 @@ func boolFromString(val string) bool {
}
}
func dockerClient(t *testing.T) client.APIClient {
t.Helper()
c, err := client.NewClientWithOpts(client.FromEnv, client.WithVersion("1.37"))
assert.NilError(t, err)
return c
}
// DefaultPollSettings used with gotestyourself/poll
var DefaultPollSettings = poll.WithDelay(100 * time.Millisecond)
// SkipIfNotExperimentalDaemon returns whether the test docker daemon is in experimental mode
func SkipIfNotExperimentalDaemon(t *testing.T) {
t.Helper()
c := dockerClient(t)
info, err := c.Info(context.Background())
assert.NilError(t, err)
skip.If(t, !info.ExperimentalBuild, "running against a non-experimental daemon")
result := icmd.RunCmd(icmd.Command("docker", "info", "--format", "{{.ExperimentalBuild}}"))
result.Assert(t, icmd.Expected{Err: icmd.None})
experimentalBuild := strings.TrimSpace(result.Stdout()) == "true"
skip.If(t, !experimentalBuild, "running against a non-experimental daemon")
}

View File

@ -48,7 +48,7 @@ repository with "version1.0.test":
## Tagging an image for a private repository
To push an image to a private registry and not the central Docker
Before pushing an image to a private registry and not the central Docker
registry you must tag it with the registry hostname and port (if needed).
docker image tag 0e5574283393 myregistryhost:5000/fedora/httpd:version1.0

View File

@ -77,6 +77,8 @@ func parseDockerDaemonHost(addr string) (string, error) {
return parseSimpleProtoAddr("npipe", addrParts[1], DefaultNamedPipe)
case "fd":
return addr, nil
case "ssh":
return addr, nil
default:
return "", fmt.Errorf("Invalid bind address format: %s", addr)
}

View File

@ -17,7 +17,15 @@ function setup {
local project=$1
local file=$2
test "${DOCKERD_EXPERIMENTAL:-}" -eq "1" && file="${file}:./e2e/compose-env.experimental.yaml"
test "${DOCKERD_EXPERIMENTAL:-0}" -eq "1" && file="${file}:./e2e/compose-env.experimental.yaml"
if [[ "${TEST_CONNHELPER:-}" = "ssh" ]];then
test ! -f "${HOME}/.ssh/id_rsa" && ssh-keygen -t rsa -C docker-e2e-dummy -N "" -f "${HOME}/.ssh/id_rsa" -q
grep "^StrictHostKeyChecking no" "${HOME}/.ssh/config" > /dev/null 2>&1 || echo "StrictHostKeyChecking no" > "${HOME}/.ssh/config"
TEST_CONNHELPER_SSH_ID_RSA_PUB=$(cat "${HOME}/.ssh/id_rsa.pub")
export TEST_CONNHELPER_SSH_ID_RSA_PUB
file="${file}:./e2e/compose-env.connhelper-ssh.yaml"
fi
COMPOSE_PROJECT_NAME=$project COMPOSE_FILE=$file docker-compose up --build -d >&2
local network="${project}_default"
@ -26,6 +34,9 @@ function setup {
engine_ip="$(container_ip "${project}_engine_1" "$network")"
engine_host="tcp://$engine_ip:2375"
if [[ "${TEST_CONNHELPER:-}" = "ssh" ]];then
engine_host="ssh://penguin@${engine_ip}"
fi
(
export DOCKER_HOST="$engine_host"
timeout 200 ./scripts/test/e2e/wait-on-daemon
@ -57,7 +68,7 @@ function runtests {
TEST_REMOTE_DAEMON="${REMOTE_DAEMON-}" \
TEST_SKIP_PLUGIN_TESTS="${SKIP_PLUGIN_TESTS-}" \
GOPATH="$GOPATH" \
PATH="$PWD/build/" \
PATH="$PWD/build/:/usr/bin" \
"$(which go)" test -v ./e2e/... ${TESTFLAGS-}
}

View File

@ -19,7 +19,7 @@ const (
RegistryPrefix = "docker.io/store/docker"
// ReleaseNotePrefix is where to point users to for release notes
ReleaseNotePrefix = "https://docs.docker.com/releasenotes"
ReleaseNotePrefix = "https://docker.com/engine/releasenotes"
// RuntimeMetadataName is the name of the runtime metadata file
// When stored as a label on the container it is prefixed by "com.docker."

View File

@ -22,7 +22,7 @@ github.com/docker/go-events 9461782956ad83b30282bf90e31fa6a70c255ba9
github.com/docker/go-metrics d466d4f6fd960e01820085bd7e1a24426ee7ef18
github.com/docker/go-units 47565b4f722fb6ceae66b95f853feed578a4a51c # v0.3.3
github.com/docker/libtrust 9cbd2a1374f46905c68a4eb3694a130610adc62a
github.com/docker/licensing f2eae57157a06681b024f1690923d03e414179a0
github.com/docker/licensing 1c117a1720cb413dd6a101d36a6c567b1ccb90fe
github.com/docker/swarmkit cfa742c8abe6f8e922f6e4e920153c408e7d9c3b
github.com/flynn-archive/go-shlex 3f9db97f856818214da2e1057f8ad84803971cff
github.com/ghodss/yaml 0ca9ea5df5451ffdf184b4428c902747c2c11cd7 # v1.0.0
@ -51,7 +51,7 @@ github.com/Microsoft/hcsshim 44c060121b68e8bdc40b411beba551f3b4ee9e55
github.com/Microsoft/go-winio v0.4.10
github.com/miekg/pkcs11 287d9350987cc9334667882061e202e96cdfb4d0
github.com/mitchellh/mapstructure f15292f7a699fcc1a38a80977f80a046874ba8ac
github.com/moby/buildkit 6812dac65e0440bb75affce1fb2175e640edc15d
github.com/moby/buildkit 520201006c9dc676da9cf9655337ac711f7f127d
github.com/modern-go/concurrent bacd9c7ef1dd9b15be4a9909b8ac7a4e313eec94 # 1.0.3
github.com/modern-go/reflect2 4b7aa43c6742a2c18fdef89dd197aaae7dac7ccd # 1.0.1
github.com/morikuni/aec 39771216ff4c63d11f5e604076f9c45e8be1067b
@ -76,12 +76,12 @@ github.com/spf13/cobra v0.0.3
github.com/spf13/pflag 4cb166e4f25ac4e8016a3595bbf7ea2e9aa85a2c https://github.com/thaJeztah/pflag.git
github.com/syndtr/gocapability 2c00daeb6c3b45114c80ac44119e7b8801fdd852
github.com/theupdateframework/notary v0.6.1
github.com/tonistiigi/fsutil b19464cd1b6a00773b4f2eb7acf9c30426f9df42
github.com/tonistiigi/fsutil f567071bed2416e4d87d260d3162722651182317
github.com/tonistiigi/units 6950e57a87eaf136bbe44ef2ec8e75b9e3569de2
github.com/xeipuuv/gojsonpointer 4e3ac2762d5f479393488629ee9370b50873b3a6
github.com/xeipuuv/gojsonreference bd5ef7bd5415a7ac448318e64f11a24cd21e594b
github.com/xeipuuv/gojsonschema 93e72a773fade158921402d6a24c819b48aba29d
golang.org/x/crypto a2144134853fc9a27a7b1e3eb4f19f1a76df13c9
golang.org/x/crypto 0709b304e793a5edb4a2c0145f281ecdc20838a4
golang.org/x/net a680a1efc54dd51c040b3b5ce4939ea3cf2ea0d1
golang.org/x/sync 1d60e4601c6fd243af51cc01ddf169918a5407ca
golang.org/x/sys 1b2967e3c290b7c545b3db0deeda16e9be4f98a2

View File

@ -1,32 +0,0 @@
package licensing
import (
"context"
"github.com/docker/licensing/lib/go-clientlib"
"github.com/docker/licensing/model"
)
func (c *client) createAccount(ctx context.Context, dockerID string, request *model.AccountCreationRequest) (*model.Account, error) {
url := c.baseURI
url.Path += "/api/billing/v4/accounts/" + dockerID
response := new(model.Account)
if _, _, err := c.doReq(ctx, "PUT", &url, clientlib.SendJSON(request), clientlib.RecvJSON(response)); err != nil {
return nil, err
}
return response, nil
}
func (c *client) getAccount(ctx context.Context, dockerID string) (*model.Account, error) {
url := c.baseURI
url.Path += "/api/billing/v4/accounts/" + dockerID
response := new(model.Account)
if _, _, err := c.doReq(ctx, "GET", &url, clientlib.RecvJSON(response)); err != nil {
return nil, err
}
return response, nil
}

View File

@ -28,7 +28,7 @@ type Client interface {
GetHubUserOrgs(ctx context.Context, authToken string) (orgs []model.Org, err error)
GetHubUserByName(ctx context.Context, username string) (user *model.User, err error)
VerifyLicense(ctx context.Context, license model.IssuedLicense) (res *model.CheckResponse, err error)
GenerateNewTrialSubscription(ctx context.Context, authToken, dockerID, email string) (subscriptionID string, err error)
GenerateNewTrialSubscription(ctx context.Context, authToken, dockerID string) (subscriptionID string, err error)
ListSubscriptions(ctx context.Context, authToken, dockerID string) (response []*model.Subscription, err error)
ListSubscriptionsDetails(ctx context.Context, authToken, dockerID string) (response []*model.SubscriptionDetail, err error)
DownloadLicenseFromHub(ctx context.Context, authToken, subscriptionID string) (license *model.IssuedLicense, err error)
@ -80,31 +80,9 @@ func (c *client) VerifyLicense(ctx context.Context, license model.IssuedLicense)
return res, nil
}
func (c *client) GenerateNewTrialSubscription(ctx context.Context, authToken, dockerID, email string) (string, error) {
func (c *client) GenerateNewTrialSubscription(ctx context.Context, authToken, dockerID string) (string, error) {
ctx = jwt.NewContext(ctx, authToken)
if _, err := c.getAccount(ctx, dockerID); err != nil {
code, ok := errors.HTTPStatus(err)
// create billing account if one is not found
if ok && code == http.StatusNotFound {
_, err = c.createAccount(ctx, dockerID, &model.AccountCreationRequest{
Profile: model.Profile{
Email: email,
},
})
if err != nil {
return "", errors.Wrap(err, errors.Fields{
"dockerID": dockerID,
"email": email,
})
}
} else {
return "", errors.Wrap(err, errors.Fields{
"dockerID": dockerID,
})
}
}
sub, err := c.createSubscription(ctx, &model.SubscriptionCreationRequest{
Name: "Docker Enterprise Free Trial",
DockerID: dockerID,
@ -116,8 +94,7 @@ func (c *client) GenerateNewTrialSubscription(ctx context.Context, authToken, do
})
if err != nil {
return "", errors.Wrap(err, errors.Fields{
"dockerID": dockerID,
"email": email,
"docker_id": dockerID,
})
}
@ -131,7 +108,7 @@ func (c *client) ListSubscriptions(ctx context.Context, authToken, dockerID stri
subs, err := c.listSubscriptions(ctx, map[string]string{"docker_id": dockerID})
if err != nil {
return nil, errors.Wrap(err, errors.Fields{
"dockerID": dockerID,
"docker_id": dockerID,
})
}
@ -155,7 +132,7 @@ func (c *client) ListSubscriptionsDetails(ctx context.Context, authToken, docker
subs, err := c.listSubscriptionsDetails(ctx, map[string]string{"docker_id": dockerID})
if err != nil {
return nil, errors.Wrap(err, errors.Fields{
"dockerID": dockerID,
"docker_id": dockerID,
})
}

View File

@ -1,61 +0,0 @@
package model
import (
"github.com/docker/licensing/lib/go-validation"
)
// Profile represents an Account profile
type Profile struct {
Email string `json:"email"`
FirstName string `json:"first_name"`
LastName string `json:"last_name"`
Addresses []*Address `json:"addresses,omitempty"`
CompanyName string `json:"company_name,omitempty"`
PhonePrimary string `json:"phone_primary,omitempty"`
JobFunction string `json:"job_function,omitempty"`
VatID string `json:"vat_id,omitempty"`
}
// Address represents a Profile address
type Address struct {
AddressLine1 string `json:"address_line_1,omitempty"`
AddressLine2 string `json:"address_line_2,omitempty"`
AddressLine3 string `json:"address_line_3,omitempty"`
City string `json:"city,omitempty"`
Province string `json:"province,omitempty"`
Country string `json:"country,omitempty"`
Postcode string `json:"post_code,omitempty"`
PrimaryAddress bool `json:"primary_address,omitempty"`
}
// Account represents a billing profile
type Account struct {
DockerID string `json:"docker_id"`
Profile Profile `json:"profile"`
}
// AccountCreationRequest represents an Account creation request
type AccountCreationRequest struct {
Profile Profile `json:"profile"`
}
// Validate returns true if the account request is valid, false otherwise.
// If invalid, one or more validation Errors will be returned.
func (a *AccountCreationRequest) Validate() (bool, validation.Errors) {
profile := a.Profile
var errs validation.Errors
if validation.IsEmpty(profile.Email) {
errs = append(errs, validation.InvalidEmpty("email"))
}
if !validation.IsEmpty(profile.Email) && !validation.IsEmail(profile.Email) {
errs = append(errs, validation.InvalidEmail("email", profile.Email))
}
valid := len(errs) == 0
return valid, errs
}

View File

@ -21,10 +21,6 @@ type User struct {
Company string `json:"company,omitempty"`
ProfileURL string `json:"profile_url,omitempty"`
GravatarURL string `json:"gravatar_url,omitempty"`
Email string `json:"email"`
Primary bool `json:"primary"`
Verified bool `json:"verified"`
}
// Org details a Docker organization

View File

@ -9,6 +9,7 @@
It has these top-level messages:
WorkerRecord
GCPolicy
*/
package moby_buildkit_v1_types
@ -35,6 +36,7 @@ type WorkerRecord struct {
ID string `protobuf:"bytes,1,opt,name=ID,proto3" json:"ID,omitempty"`
Labels map[string]string `protobuf:"bytes,2,rep,name=Labels" json:"Labels,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"`
Platforms []pb.Platform `protobuf:"bytes,3,rep,name=platforms" json:"platforms"`
GCPolicy []*GCPolicy `protobuf:"bytes,4,rep,name=GCPolicy" json:"GCPolicy,omitempty"`
}
func (m *WorkerRecord) Reset() { *m = WorkerRecord{} }
@ -63,8 +65,56 @@ func (m *WorkerRecord) GetPlatforms() []pb.Platform {
return nil
}
func (m *WorkerRecord) GetGCPolicy() []*GCPolicy {
if m != nil {
return m.GCPolicy
}
return nil
}
type GCPolicy struct {
All bool `protobuf:"varint,1,opt,name=all,proto3" json:"all,omitempty"`
KeepDuration int64 `protobuf:"varint,2,opt,name=keepDuration,proto3" json:"keepDuration,omitempty"`
KeepBytes int64 `protobuf:"varint,3,opt,name=keepBytes,proto3" json:"keepBytes,omitempty"`
Filters []string `protobuf:"bytes,4,rep,name=filters" json:"filters,omitempty"`
}
func (m *GCPolicy) Reset() { *m = GCPolicy{} }
func (m *GCPolicy) String() string { return proto.CompactTextString(m) }
func (*GCPolicy) ProtoMessage() {}
func (*GCPolicy) Descriptor() ([]byte, []int) { return fileDescriptorWorker, []int{1} }
func (m *GCPolicy) GetAll() bool {
if m != nil {
return m.All
}
return false
}
func (m *GCPolicy) GetKeepDuration() int64 {
if m != nil {
return m.KeepDuration
}
return 0
}
func (m *GCPolicy) GetKeepBytes() int64 {
if m != nil {
return m.KeepBytes
}
return 0
}
func (m *GCPolicy) GetFilters() []string {
if m != nil {
return m.Filters
}
return nil
}
func init() {
proto.RegisterType((*WorkerRecord)(nil), "moby.buildkit.v1.types.WorkerRecord")
proto.RegisterType((*GCPolicy)(nil), "moby.buildkit.v1.types.GCPolicy")
}
func (m *WorkerRecord) Marshal() (dAtA []byte, err error) {
size := m.Size()
@ -116,6 +166,71 @@ func (m *WorkerRecord) MarshalTo(dAtA []byte) (int, error) {
i += n
}
}
if len(m.GCPolicy) > 0 {
for _, msg := range m.GCPolicy {
dAtA[i] = 0x22
i++
i = encodeVarintWorker(dAtA, i, uint64(msg.Size()))
n, err := msg.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n
}
}
return i, nil
}
func (m *GCPolicy) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *GCPolicy) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.All {
dAtA[i] = 0x8
i++
if m.All {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
i++
}
if m.KeepDuration != 0 {
dAtA[i] = 0x10
i++
i = encodeVarintWorker(dAtA, i, uint64(m.KeepDuration))
}
if m.KeepBytes != 0 {
dAtA[i] = 0x18
i++
i = encodeVarintWorker(dAtA, i, uint64(m.KeepBytes))
}
if len(m.Filters) > 0 {
for _, s := range m.Filters {
dAtA[i] = 0x22
i++
l = len(s)
for l >= 1<<7 {
dAtA[i] = uint8(uint64(l)&0x7f | 0x80)
l >>= 7
i++
}
dAtA[i] = uint8(l)
i++
i += copy(dAtA[i:], s)
}
}
return i, nil
}
@ -149,6 +264,33 @@ func (m *WorkerRecord) Size() (n int) {
n += 1 + l + sovWorker(uint64(l))
}
}
if len(m.GCPolicy) > 0 {
for _, e := range m.GCPolicy {
l = e.Size()
n += 1 + l + sovWorker(uint64(l))
}
}
return n
}
func (m *GCPolicy) Size() (n int) {
var l int
_ = l
if m.All {
n += 2
}
if m.KeepDuration != 0 {
n += 1 + sovWorker(uint64(m.KeepDuration))
}
if m.KeepBytes != 0 {
n += 1 + sovWorker(uint64(m.KeepBytes))
}
if len(m.Filters) > 0 {
for _, s := range m.Filters {
l = len(s)
n += 1 + l + sovWorker(uint64(l))
}
}
return n
}
@ -372,6 +514,174 @@ func (m *WorkerRecord) Unmarshal(dAtA []byte) error {
return err
}
iNdEx = postIndex
case 4:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field GCPolicy", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowWorker
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthWorker
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.GCPolicy = append(m.GCPolicy, &GCPolicy{})
if err := m.GCPolicy[len(m.GCPolicy)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipWorker(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthWorker
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *GCPolicy) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowWorker
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: GCPolicy: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: GCPolicy: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field All", wireType)
}
var v int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowWorker
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
m.All = bool(v != 0)
case 2:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field KeepDuration", wireType)
}
m.KeepDuration = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowWorker
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.KeepDuration |= (int64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 3:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field KeepBytes", wireType)
}
m.KeepBytes = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowWorker
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.KeepBytes |= (int64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 4:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Filters", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowWorker
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthWorker
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Filters = append(m.Filters, string(dAtA[iNdEx:postIndex]))
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipWorker(dAtA[iNdEx:])
@ -501,23 +811,28 @@ var (
func init() { proto.RegisterFile("worker.proto", fileDescriptorWorker) }
var fileDescriptorWorker = []byte{
// 273 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x8f, 0x41, 0x4b, 0xf3, 0x40,
0x10, 0x86, 0xbf, 0x4d, 0x3e, 0x0b, 0xdd, 0x06, 0x91, 0x45, 0x24, 0xe4, 0x10, 0x8b, 0xa7, 0x1e,
0x74, 0xb6, 0xea, 0x45, 0x3d, 0x96, 0x0a, 0x16, 0x3c, 0x48, 0x2e, 0x9e, 0xb3, 0xed, 0x36, 0x86,
0x24, 0xce, 0xb2, 0xd9, 0x44, 0xf2, 0x0f, 0x7b, 0xf4, 0xe2, 0x55, 0x24, 0xbf, 0x44, 0xba, 0x89,
0x98, 0x83, 0xb7, 0x79, 0x87, 0x67, 0x1e, 0xde, 0xa1, 0xde, 0x1b, 0xea, 0x4c, 0x6a, 0x50, 0x1a,
0x0d, 0xb2, 0x93, 0x02, 0x45, 0x03, 0xa2, 0x4a, 0xf3, 0x4d, 0x96, 0x1a, 0xa8, 0x2f, 0xc1, 0x34,
0x4a, 0x96, 0xc1, 0x45, 0x92, 0x9a, 0x97, 0x4a, 0xc0, 0x1a, 0x0b, 0x9e, 0x60, 0x82, 0xdc, 0xe2,
0xa2, 0xda, 0xda, 0x64, 0x83, 0x9d, 0x3a, 0x4d, 0x70, 0x3e, 0xc0, 0xf7, 0x46, 0xfe, 0x63, 0xe4,
0x25, 0xe6, 0xb5, 0xd4, 0x5c, 0x09, 0x8e, 0xaa, 0xec, 0xe8, 0xb3, 0x0f, 0x42, 0xbd, 0x67, 0xdb,
0x22, 0x92, 0x6b, 0xd4, 0x1b, 0x76, 0x48, 0x9d, 0xd5, 0xd2, 0x27, 0x53, 0x32, 0x1b, 0x47, 0xce,
0x6a, 0xc9, 0x1e, 0xe8, 0xe8, 0x31, 0x16, 0x32, 0x2f, 0x7d, 0x67, 0xea, 0xce, 0x26, 0x57, 0x73,
0xf8, 0xbb, 0x26, 0x0c, 0x2d, 0xd0, 0x9d, 0xdc, 0xbf, 0x1a, 0xdd, 0x44, 0xfd, 0x3d, 0x9b, 0xd3,
0xb1, 0xca, 0x63, 0xb3, 0x45, 0x5d, 0x94, 0xbe, 0x6b, 0x65, 0x1e, 0x28, 0x01, 0x4f, 0xfd, 0x72,
0xf1, 0x7f, 0xf7, 0x79, 0xfa, 0x2f, 0xfa, 0x85, 0x82, 0x5b, 0x3a, 0x19, 0x88, 0xd8, 0x11, 0x75,
0x33, 0xd9, 0xf4, 0xdd, 0xf6, 0x23, 0x3b, 0xa6, 0x07, 0x75, 0x9c, 0x57, 0xd2, 0x77, 0xec, 0xae,
0x0b, 0x77, 0xce, 0x0d, 0x59, 0x78, 0xbb, 0x36, 0x24, 0xef, 0x6d, 0x48, 0xbe, 0xda, 0x90, 0x88,
0x91, 0x7d, 0xf6, 0xfa, 0x3b, 0x00, 0x00, 0xff, 0xff, 0xa9, 0x5c, 0x8f, 0x26, 0x71, 0x01, 0x00,
0x00,
// 355 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x91, 0xc1, 0x4e, 0xea, 0x40,
0x14, 0x86, 0x6f, 0x5b, 0x2e, 0x97, 0x0e, 0xcd, 0x8d, 0x99, 0x18, 0xd3, 0x10, 0x83, 0x84, 0x15,
0x0b, 0x9d, 0xa2, 0x6e, 0xd4, 0xb8, 0x42, 0x8c, 0x92, 0xb8, 0x20, 0xb3, 0x71, 0xdd, 0x81, 0x01,
0x9b, 0x0e, 0x9c, 0xc9, 0x74, 0x8a, 0xf6, 0x39, 0x7c, 0x29, 0x96, 0x3e, 0x81, 0x31, 0x3c, 0x89,
0x99, 0x29, 0x08, 0x26, 0xba, 0x3b, 0xff, 0x9f, 0xff, 0xfb, 0xe7, 0x9c, 0x0c, 0x0a, 0x9e, 0x41,
0xa5, 0x5c, 0x11, 0xa9, 0x40, 0x03, 0x3e, 0x98, 0x01, 0x2b, 0x08, 0xcb, 0x13, 0x31, 0x4e, 0x13,
0x4d, 0x16, 0xa7, 0x44, 0x17, 0x92, 0x67, 0x8d, 0x93, 0x69, 0xa2, 0x9f, 0x72, 0x46, 0x46, 0x30,
0x8b, 0xa6, 0x30, 0x85, 0xc8, 0xc6, 0x59, 0x3e, 0xb1, 0xca, 0x0a, 0x3b, 0x95, 0x35, 0x8d, 0xe3,
0x9d, 0xb8, 0x69, 0x8c, 0x36, 0x8d, 0x51, 0x06, 0x62, 0xc1, 0x55, 0x24, 0x59, 0x04, 0x32, 0x2b,
0xd3, 0xed, 0x57, 0x17, 0x05, 0x8f, 0x76, 0x0b, 0xca, 0x47, 0xa0, 0xc6, 0xf8, 0x3f, 0x72, 0x07,
0xfd, 0xd0, 0x69, 0x39, 0x1d, 0x9f, 0xba, 0x83, 0x3e, 0xbe, 0x47, 0xd5, 0x87, 0x98, 0x71, 0x91,
0x85, 0x6e, 0xcb, 0xeb, 0xd4, 0xcf, 0xba, 0xe4, 0xe7, 0x35, 0xc9, 0x6e, 0x0b, 0x29, 0x91, 0xdb,
0xb9, 0x56, 0x05, 0x5d, 0xf3, 0xb8, 0x8b, 0x7c, 0x29, 0x62, 0x3d, 0x01, 0x35, 0xcb, 0x42, 0xcf,
0x96, 0x05, 0x44, 0x32, 0x32, 0x5c, 0x9b, 0xbd, 0xca, 0xf2, 0xfd, 0xe8, 0x0f, 0xdd, 0x86, 0xf0,
0x35, 0xaa, 0xdd, 0xdd, 0x0c, 0x41, 0x24, 0xa3, 0x22, 0xac, 0x58, 0xa0, 0xf5, 0xdb, 0xeb, 0x9b,
0x1c, 0xfd, 0x22, 0x1a, 0x97, 0xa8, 0xbe, 0xb3, 0x06, 0xde, 0x43, 0x5e, 0xca, 0x8b, 0xf5, 0x65,
0x66, 0xc4, 0xfb, 0xe8, 0xef, 0x22, 0x16, 0x39, 0x0f, 0x5d, 0xeb, 0x95, 0xe2, 0xca, 0xbd, 0x70,
0xda, 0x2f, 0xdb, 0x87, 0x0d, 0x17, 0x0b, 0x61, 0xb9, 0x1a, 0x35, 0x23, 0x6e, 0xa3, 0x20, 0xe5,
0x5c, 0xf6, 0x73, 0x15, 0xeb, 0x04, 0xe6, 0x16, 0xf7, 0xe8, 0x37, 0x0f, 0x1f, 0x22, 0xdf, 0xe8,
0x5e, 0xa1, 0xb9, 0x39, 0xd6, 0x04, 0xb6, 0x06, 0x0e, 0xd1, 0xbf, 0x49, 0x22, 0x34, 0x57, 0x99,
0xbd, 0xcb, 0xa7, 0x1b, 0xd9, 0x0b, 0x96, 0xab, 0xa6, 0xf3, 0xb6, 0x6a, 0x3a, 0x1f, 0xab, 0xa6,
0xc3, 0xaa, 0xf6, 0x93, 0xce, 0x3f, 0x03, 0x00, 0x00, 0xff, 0xff, 0xfc, 0x79, 0x52, 0x6a, 0x29,
0x02, 0x00, 0x00,
}

View File

@ -13,4 +13,12 @@ message WorkerRecord {
string ID = 1;
map<string, string> Labels = 2;
repeated pb.Platform platforms = 3 [(gogoproto.nullable) = false];
repeated GCPolicy GCPolicy = 4;
}
message GCPolicy {
bool all = 1;
int64 keepDuration = 2;
int64 keepBytes = 3;
repeated string filters = 4;
}

View File

@ -84,6 +84,16 @@ func (g *gatewayClientForBuild) ReadFile(ctx context.Context, in *gatewayapi.Rea
return g.gateway.ReadFile(ctx, in, opts...)
}
func (g *gatewayClientForBuild) ReadDir(ctx context.Context, in *gatewayapi.ReadDirRequest, opts ...grpc.CallOption) (*gatewayapi.ReadDirResponse, error) {
ctx = buildid.AppendToOutgoingContext(ctx, g.buildID)
return g.gateway.ReadDir(ctx, in, opts...)
}
func (g *gatewayClientForBuild) StatFile(ctx context.Context, in *gatewayapi.StatFileRequest, opts ...grpc.CallOption) (*gatewayapi.StatFileResponse, error) {
ctx = buildid.AppendToOutgoingContext(ctx, g.buildID)
return g.gateway.StatFile(ctx, in, opts...)
}
func (g *gatewayClientForBuild) Ping(ctx context.Context, in *gatewayapi.PingRequest, opts ...grpc.CallOption) (*gatewayapi.PongResponse, error) {
ctx = buildid.AppendToOutgoingContext(ctx, g.buildID)
return g.gateway.Ping(ctx, in, opts...)

View File

@ -25,12 +25,11 @@ type ClientOpt interface{}
func New(ctx context.Context, address string, opts ...ClientOpt) (*Client, error) {
gopts := []grpc.DialOption{
grpc.WithDialer(dialer),
grpc.FailOnNonTempDialError(true),
}
needWithInsecure := true
for _, o := range opts {
if _, ok := o.(*withBlockOpt); ok {
gopts = append(gopts, grpc.WithBlock(), grpc.FailOnNonTempDialError(true))
if _, ok := o.(*withFailFast); ok {
gopts = append(gopts, grpc.FailOnNonTempDialError(true))
}
if credInfo, ok := o.(*withCredentials); ok {
opt, err := loadCredentials(credInfo)
@ -52,7 +51,6 @@ func New(ctx context.Context, address string, opts ...ClientOpt) (*Client, error
if address == "" {
address = appdefaults.Address
}
conn, err := grpc.DialContext(ctx, address, gopts...)
if err != nil {
return nil, errors.Wrapf(err, "failed to dial %q . make sure buildkitd is running", address)
@ -71,10 +69,10 @@ func (c *Client) Close() error {
return c.conn.Close()
}
type withBlockOpt struct{}
type withFailFast struct{}
func WithBlock() ClientOpt {
return &withBlockOpt{}
func WithFailFast() ClientOpt {
return &withFailFast{}
}
type withCredentials struct {

View File

@ -2,10 +2,12 @@ package llb
import (
_ "crypto/sha256"
"fmt"
"net"
"sort"
"github.com/moby/buildkit/solver/pb"
"github.com/moby/buildkit/util/system"
digest "github.com/opencontainers/go-digest"
"github.com/pkg/errors"
)
@ -61,6 +63,7 @@ type ExecOp struct {
constraints Constraints
isValidated bool
secrets []SecretInfo
ssh []SSHInfo
}
func (e *ExecOp) AddMount(target string, source Output, opt ...MountOption) Output {
@ -130,6 +133,24 @@ func (e *ExecOp) Marshal(c *Constraints) (digest.Digest, []byte, *pb.OpMetadata,
return e.mounts[i].target < e.mounts[j].target
})
if len(e.ssh) > 0 {
for i, s := range e.ssh {
if s.Target == "" {
e.ssh[i].Target = fmt.Sprintf("/run/buildkit/ssh_agent.%d", i)
}
}
if _, ok := e.meta.Env.Get("SSH_AUTH_SOCK"); !ok {
e.meta.Env = e.meta.Env.AddOrReplace("SSH_AUTH_SOCK", e.ssh[0].Target)
}
}
if c.Caps != nil {
if err := c.Caps.Supports(pb.CapExecMetaSetsDefaultPath); err != nil {
e.meta.Env = e.meta.Env.SetDefault("PATH", system.DefaultPathEnv)
} else {
addCap(&e.constraints, pb.CapExecMetaSetsDefaultPath)
}
}
meta := &pb.Meta{
Args: e.meta.Args,
Env: e.meta.Env.ToArray(),
@ -178,6 +199,14 @@ func (e *ExecOp) Marshal(c *Constraints) (digest.Digest, []byte, *pb.OpMetadata,
}
}
if len(e.secrets) > 0 {
addCap(&e.constraints, pb.CapExecMountSecret)
}
if len(e.ssh) > 0 {
addCap(&e.constraints, pb.CapExecMountSSH)
}
pop, md := MarshalConstraints(c, &e.constraints)
pop.Op = &pb.Op_Exec{
Exec: peo,
@ -245,10 +274,6 @@ func (e *ExecOp) Marshal(c *Constraints) (digest.Digest, []byte, *pb.OpMetadata,
peo.Mounts = append(peo.Mounts, pm)
}
if len(e.secrets) > 0 {
addCap(&e.constraints, pb.CapMountSecret)
}
for _, s := range e.secrets {
pm := &pb.Mount{
Dest: s.Target,
@ -264,6 +289,21 @@ func (e *ExecOp) Marshal(c *Constraints) (digest.Digest, []byte, *pb.OpMetadata,
peo.Mounts = append(peo.Mounts, pm)
}
for _, s := range e.ssh {
pm := &pb.Mount{
Dest: s.Target,
MountType: pb.MountType_SSH,
SSHOpt: &pb.SSHOpt{
ID: s.ID,
Uid: uint32(s.UID),
Gid: uint32(s.GID),
Mode: uint32(s.Mode),
Optional: s.Optional,
},
}
peo.Mounts = append(peo.Mounts, pm)
}
dt, err := pop.Marshal()
if err != nil {
return "", nil, nil, err
@ -432,6 +472,62 @@ func AddMount(dest string, mountState State, opts ...MountOption) RunOption {
})
}
func AddSSHSocket(opts ...SSHOption) RunOption {
return runOptionFunc(func(ei *ExecInfo) {
s := &SSHInfo{
Mode: 0600,
}
for _, opt := range opts {
opt.SetSSHOption(s)
}
ei.SSH = append(ei.SSH, *s)
})
}
type SSHOption interface {
SetSSHOption(*SSHInfo)
}
type sshOptionFunc func(*SSHInfo)
func (fn sshOptionFunc) SetSSHOption(si *SSHInfo) {
fn(si)
}
func SSHID(id string) SSHOption {
return sshOptionFunc(func(si *SSHInfo) {
si.ID = id
})
}
func SSHSocketTarget(target string) SSHOption {
return sshOptionFunc(func(si *SSHInfo) {
si.Target = target
})
}
func SSHSocketOpt(target string, uid, gid, mode int) SSHOption {
return sshOptionFunc(func(si *SSHInfo) {
si.Target = target
si.UID = uid
si.GID = gid
si.Mode = mode
})
}
var SSHOptional = sshOptionFunc(func(si *SSHInfo) {
si.Optional = true
})
type SSHInfo struct {
ID string
Target string
Mode int
UID int
GID int
Optional bool
}
func AddSecret(dest string, opts ...SecretOption) RunOption {
return runOptionFunc(func(ei *ExecInfo) {
s := &SecretInfo{ID: dest, Target: dest, Mode: 0400}
@ -498,6 +594,7 @@ type ExecInfo struct {
ReadonlyRootFS bool
ProxyEnv *ProxyEnv
Secrets []SecretInfo
SSH []SSHInfo
}
type MountInfo struct {

View File

@ -23,10 +23,6 @@ var (
keyNetwork = contextKeyT("llb.network")
)
func addEnv(key, value string) StateOption {
return addEnvf(key, value)
}
func addEnvf(key, value string, v ...interface{}) StateOption {
return func(s State) State {
return s.WithValue(keyEnv, getEnv(s).AddOrReplace(key, fmt.Sprintf(value, v...)))
@ -175,6 +171,13 @@ func (e EnvList) AddOrReplace(k, v string) EnvList {
return e
}
func (e EnvList) SetDefault(k, v string) EnvList {
if _, ok := e.Get(k); !ok {
e = append(e, KeyValue{key: k, value: v})
}
return e
}
func (e EnvList) Delete(k string) EnvList {
e = append([]KeyValue(nil), e...)
if i, ok := e.Index(k); ok {

View File

@ -7,12 +7,14 @@ import (
digest "github.com/opencontainers/go-digest"
)
// WithMetaResolver adds a metadata resolver to an image
func WithMetaResolver(mr ImageMetaResolver) ImageOption {
return imageOptionFunc(func(ii *ImageInfo) {
ii.metaResolver = mr
})
}
// ImageMetaResolver can resolve image config metadata from a reference
type ImageMetaResolver interface {
ResolveImageConfig(ctx context.Context, ref string, opt gw.ResolveImageConfigOpt) (digest.Digest, []byte, error)
}

View File

@ -9,7 +9,6 @@ import (
"github.com/moby/buildkit/identity"
"github.com/moby/buildkit/solver/pb"
"github.com/moby/buildkit/util/apicaps"
"github.com/moby/buildkit/util/system"
digest "github.com/opencontainers/go-digest"
specs "github.com/opencontainers/image-spec/specs-go/v1"
)
@ -34,7 +33,6 @@ func NewState(o Output) State {
ctx: context.Background(),
}
s = dir("/")(s)
s = addEnv("PATH", system.DefaultPathEnv)(s)
s = s.ensurePlatform()
return s
}
@ -67,7 +65,7 @@ func (s State) Value(k interface{}) interface{} {
return s.ctx.Value(k)
}
func (s State) SetMarhalDefaults(co ...ConstraintsOpt) State {
func (s State) SetMarshalDefaults(co ...ConstraintsOpt) State {
s.opts = co
return s
}
@ -196,6 +194,7 @@ func (s State) Run(ro ...RunOption) ExecState {
exec.AddMount(m.Target, m.Source, m.Opts...)
}
exec.secrets = ei.Secrets
exec.ssh = ei.SSH
return ExecState{
State: s.WithOutput(exec.Output()),
@ -410,6 +409,13 @@ func WithoutDefaultExportCache() ConstraintsOpt {
})
}
// WithCaps exposes supported LLB caps to the marshaler
func WithCaps(caps apicaps.CapSet) ConstraintsOpt {
return constraintsOptFunc(func(c *Constraints) {
c.Caps = &caps
})
}
type constraintsWrapper struct {
Constraints
}
@ -423,6 +429,7 @@ type Constraints struct {
WorkerConstraints []string
Metadata pb.OpMetadata
LocalUniqueID string
Caps *apicaps.CapSet
}
func Platform(p specs.Platform) ConstraintsOpt {

View File

@ -19,7 +19,7 @@ import (
opentracing "github.com/opentracing/opentracing-go"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/tonistiigi/fsutil"
fstypes "github.com/tonistiigi/fsutil/types"
"golang.org/x/sync/errgroup"
)
@ -256,7 +256,7 @@ func prepareSyncedDirs(def *llb.Definition, localDirs map[string]string) ([]file
return nil, errors.Errorf("%s not a directory", d)
}
}
resetUIDAndGID := func(st *fsutil.Stat) bool {
resetUIDAndGID := func(st *fstypes.Stat) bool {
st.Uid = 0
st.Gid = 0
return true

View File

@ -2,19 +2,24 @@ package client
import (
"context"
"time"
controlapi "github.com/moby/buildkit/api/services/control"
apitypes "github.com/moby/buildkit/api/types"
"github.com/moby/buildkit/solver/pb"
specs "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
)
// WorkerInfo contains information about a worker
type WorkerInfo struct {
ID string
Labels map[string]string
Platforms []specs.Platform
GCPolicy []PruneInfo
}
// ListWorkers lists all active workers
func (c *Client) ListWorkers(ctx context.Context, opts ...ListWorkersOption) ([]*WorkerInfo, error) {
info := &ListWorkersInfo{}
for _, o := range opts {
@ -34,16 +39,32 @@ func (c *Client) ListWorkers(ctx context.Context, opts ...ListWorkersOption) ([]
ID: w.ID,
Labels: w.Labels,
Platforms: pb.ToSpecPlatforms(w.Platforms),
GCPolicy: fromAPIGCPolicy(w.GCPolicy),
})
}
return wi, nil
}
// ListWorkersOption is an option for a worker list query
type ListWorkersOption interface {
SetListWorkersOption(*ListWorkersInfo)
}
// ListWorkersInfo is a payload for worker list query
type ListWorkersInfo struct {
Filter []string
}
func fromAPIGCPolicy(in []*apitypes.GCPolicy) []PruneInfo {
out := make([]PruneInfo, 0, len(in))
for _, p := range in {
out = append(out, PruneInfo{
All: p.All,
Filter: p.Filters,
KeepDuration: time.Duration(p.KeepDuration),
KeepBytes: p.KeepBytes,
})
}
return out
}

View File

@ -4,8 +4,10 @@ import (
"context"
"github.com/moby/buildkit/solver/pb"
"github.com/moby/buildkit/util/apicaps"
digest "github.com/opencontainers/go-digest"
specs "github.com/opencontainers/image-spec/specs-go/v1"
fstypes "github.com/tonistiigi/fsutil/types"
)
type Client interface {
@ -16,8 +18,8 @@ type Client interface {
type Reference interface {
ReadFile(ctx context.Context, req ReadRequest) ([]byte, error)
// StatFile(ctx context.Context, req StatRequest) (*StatResponse, error)
// ReadDir(ctx context.Context, req ReadDirRequest) ([]*StatResponse, error)
StatFile(ctx context.Context, req StatRequest) (*fstypes.Stat, error)
ReadDir(ctx context.Context, req ReadDirRequest) ([]*fstypes.Stat, error)
}
type ReadRequest struct {
@ -30,6 +32,15 @@ type FileRange struct {
Length int
}
type ReadDirRequest struct {
Path string
IncludePattern string
}
type StatRequest struct {
Path string
}
// SolveRequest is same as frontend.SolveRequest but avoiding dependency
type SolveRequest struct {
Definition *pb.Definition
@ -49,6 +60,8 @@ type BuildOpts struct {
SessionID string
Workers []WorkerInfo
Product string
LLBCaps apicaps.CapSet
Caps apicaps.CapSet
}
type ResolveImageConfigOpt struct {

View File

@ -16,6 +16,7 @@ import (
"github.com/moby/buildkit/util/apicaps"
digest "github.com/opencontainers/go-digest"
"github.com/pkg/errors"
fstypes "github.com/tonistiigi/fsutil/types"
"google.golang.org/grpc"
"google.golang.org/grpc/status"
)
@ -207,7 +208,7 @@ func defaultLLBCaps() []apicaps.PBCap {
{ID: string(opspb.CapExecMountCacheSharing), Enabled: true},
{ID: string(opspb.CapExecMountSelector), Enabled: true},
{ID: string(opspb.CapExecMountTmpfs), Enabled: true},
{ID: string(opspb.CapMountSecret), Enabled: true},
{ID: string(opspb.CapExecMountSecret), Enabled: true},
{ID: string(opspb.CapConstraints), Enabled: true},
{ID: string(opspb.CapPlatform), Enabled: true},
{ID: string(opspb.CapMetaIgnoreCache), Enabled: true},
@ -329,21 +330,11 @@ func (c *grpcClient) BuildOpts() client.BuildOpts {
SessionID: c.sessionID,
Workers: c.workers,
Product: c.product,
LLBCaps: c.llbCaps,
Caps: c.caps,
}
}
func (c *grpcClient) Opts() map[string]string {
return c.opts
}
func (c *grpcClient) SessionID() string {
return c.sessionID
}
func (c *grpcClient) WorkerInfos() []client.WorkerInfo {
return c.workers
}
type reference struct {
id string
c *grpcClient
@ -364,6 +355,31 @@ func (r *reference) ReadFile(ctx context.Context, req client.ReadRequest) ([]byt
return resp.Data, nil
}
func (r *reference) ReadDir(ctx context.Context, req client.ReadDirRequest) ([]*fstypes.Stat, error) {
rdr := &pb.ReadDirRequest{
DirPath: req.Path,
IncludePattern: req.IncludePattern,
Ref: r.id,
}
resp, err := r.c.client.ReadDir(ctx, rdr)
if err != nil {
return nil, err
}
return resp.Entries, nil
}
func (r *reference) StatFile(ctx context.Context, req client.StatRequest) (*fstypes.Stat, error) {
rdr := &pb.StatFileRequest{
Path: req.Path,
Ref: r.id,
}
resp, err := r.c.client.StatFile(ctx, rdr)
if err != nil {
return nil, err
}
return resp.Stat, nil
}
func grpcClientConn(ctx context.Context) (context.Context, *grpc.ClientConn, error) {
dialOpt := grpc.WithDialer(func(addr string, d time.Duration) (net.Conn, error) {
return stdioConn(), nil

View File

@ -16,6 +16,8 @@ const (
CapReadFile apicaps.CapID = "readfile"
CapReturnResult apicaps.CapID = "return"
CapReturnMap apicaps.CapID = "returnmap"
CapReadDir apicaps.CapID = "readdir"
CapStatFile apicaps.CapID = "statfile"
)
func init() {
@ -69,4 +71,17 @@ func init() {
Status: apicaps.CapStatusExperimental,
})
Caps.Init(apicaps.Cap{
ID: CapReadDir,
Name: "read static directory",
Enabled: true,
Status: apicaps.CapStatusExperimental,
})
Caps.Init(apicaps.Cap{
ID: CapStatFile,
Name: "stat a file",
Enabled: true,
Status: apicaps.CapStatusExperimental,
})
}

View File

@ -19,6 +19,10 @@
ReadFileRequest
FileRange
ReadFileResponse
ReadDirRequest
ReadDirResponse
StatFileRequest
StatFileResponse
PingRequest
PongResponse
*/
@ -32,6 +36,7 @@ import google_rpc "github.com/gogo/googleapis/google/rpc"
import pb "github.com/moby/buildkit/solver/pb"
import moby_buildkit_v1_types "github.com/moby/buildkit/api/types"
import moby_buildkit_v1_apicaps "github.com/moby/buildkit/util/apicaps/pb"
import fsutil_types "github.com/tonistiigi/fsutil/types"
import github_com_opencontainers_go_digest "github.com/opencontainers/go-digest"
@ -448,13 +453,101 @@ func (m *ReadFileResponse) GetData() []byte {
return nil
}
type ReadDirRequest struct {
Ref string `protobuf:"bytes,1,opt,name=Ref,proto3" json:"Ref,omitempty"`
DirPath string `protobuf:"bytes,2,opt,name=DirPath,proto3" json:"DirPath,omitempty"`
IncludePattern string `protobuf:"bytes,3,opt,name=IncludePattern,proto3" json:"IncludePattern,omitempty"`
}
func (m *ReadDirRequest) Reset() { *m = ReadDirRequest{} }
func (m *ReadDirRequest) String() string { return proto.CompactTextString(m) }
func (*ReadDirRequest) ProtoMessage() {}
func (*ReadDirRequest) Descriptor() ([]byte, []int) { return fileDescriptorGateway, []int{11} }
func (m *ReadDirRequest) GetRef() string {
if m != nil {
return m.Ref
}
return ""
}
func (m *ReadDirRequest) GetDirPath() string {
if m != nil {
return m.DirPath
}
return ""
}
func (m *ReadDirRequest) GetIncludePattern() string {
if m != nil {
return m.IncludePattern
}
return ""
}
type ReadDirResponse struct {
Entries []*fsutil_types.Stat `protobuf:"bytes,1,rep,name=entries" json:"entries,omitempty"`
}
func (m *ReadDirResponse) Reset() { *m = ReadDirResponse{} }
func (m *ReadDirResponse) String() string { return proto.CompactTextString(m) }
func (*ReadDirResponse) ProtoMessage() {}
func (*ReadDirResponse) Descriptor() ([]byte, []int) { return fileDescriptorGateway, []int{12} }
func (m *ReadDirResponse) GetEntries() []*fsutil_types.Stat {
if m != nil {
return m.Entries
}
return nil
}
type StatFileRequest struct {
Ref string `protobuf:"bytes,1,opt,name=Ref,proto3" json:"Ref,omitempty"`
Path string `protobuf:"bytes,2,opt,name=Path,proto3" json:"Path,omitempty"`
}
func (m *StatFileRequest) Reset() { *m = StatFileRequest{} }
func (m *StatFileRequest) String() string { return proto.CompactTextString(m) }
func (*StatFileRequest) ProtoMessage() {}
func (*StatFileRequest) Descriptor() ([]byte, []int) { return fileDescriptorGateway, []int{13} }
func (m *StatFileRequest) GetRef() string {
if m != nil {
return m.Ref
}
return ""
}
func (m *StatFileRequest) GetPath() string {
if m != nil {
return m.Path
}
return ""
}
type StatFileResponse struct {
Stat *fsutil_types.Stat `protobuf:"bytes,1,opt,name=stat" json:"stat,omitempty"`
}
func (m *StatFileResponse) Reset() { *m = StatFileResponse{} }
func (m *StatFileResponse) String() string { return proto.CompactTextString(m) }
func (*StatFileResponse) ProtoMessage() {}
func (*StatFileResponse) Descriptor() ([]byte, []int) { return fileDescriptorGateway, []int{14} }
func (m *StatFileResponse) GetStat() *fsutil_types.Stat {
if m != nil {
return m.Stat
}
return nil
}
type PingRequest struct {
}
func (m *PingRequest) Reset() { *m = PingRequest{} }
func (m *PingRequest) String() string { return proto.CompactTextString(m) }
func (*PingRequest) ProtoMessage() {}
func (*PingRequest) Descriptor() ([]byte, []int) { return fileDescriptorGateway, []int{11} }
func (*PingRequest) Descriptor() ([]byte, []int) { return fileDescriptorGateway, []int{15} }
type PongResponse struct {
FrontendAPICaps []moby_buildkit_v1_apicaps.APICap `protobuf:"bytes,1,rep,name=FrontendAPICaps" json:"FrontendAPICaps"`
@ -465,7 +558,7 @@ type PongResponse struct {
func (m *PongResponse) Reset() { *m = PongResponse{} }
func (m *PongResponse) String() string { return proto.CompactTextString(m) }
func (*PongResponse) ProtoMessage() {}
func (*PongResponse) Descriptor() ([]byte, []int) { return fileDescriptorGateway, []int{12} }
func (*PongResponse) Descriptor() ([]byte, []int) { return fileDescriptorGateway, []int{16} }
func (m *PongResponse) GetFrontendAPICaps() []moby_buildkit_v1_apicaps.APICap {
if m != nil {
@ -500,6 +593,10 @@ func init() {
proto.RegisterType((*ReadFileRequest)(nil), "moby.buildkit.v1.frontend.ReadFileRequest")
proto.RegisterType((*FileRange)(nil), "moby.buildkit.v1.frontend.FileRange")
proto.RegisterType((*ReadFileResponse)(nil), "moby.buildkit.v1.frontend.ReadFileResponse")
proto.RegisterType((*ReadDirRequest)(nil), "moby.buildkit.v1.frontend.ReadDirRequest")
proto.RegisterType((*ReadDirResponse)(nil), "moby.buildkit.v1.frontend.ReadDirResponse")
proto.RegisterType((*StatFileRequest)(nil), "moby.buildkit.v1.frontend.StatFileRequest")
proto.RegisterType((*StatFileResponse)(nil), "moby.buildkit.v1.frontend.StatFileResponse")
proto.RegisterType((*PingRequest)(nil), "moby.buildkit.v1.frontend.PingRequest")
proto.RegisterType((*PongResponse)(nil), "moby.buildkit.v1.frontend.PongResponse")
}
@ -521,6 +618,10 @@ type LLBBridgeClient interface {
Solve(ctx context.Context, in *SolveRequest, opts ...grpc.CallOption) (*SolveResponse, error)
// apicaps:CapReadFile
ReadFile(ctx context.Context, in *ReadFileRequest, opts ...grpc.CallOption) (*ReadFileResponse, error)
// apicaps:CapReadDir
ReadDir(ctx context.Context, in *ReadDirRequest, opts ...grpc.CallOption) (*ReadDirResponse, error)
// apicaps:CapStatFile
StatFile(ctx context.Context, in *StatFileRequest, opts ...grpc.CallOption) (*StatFileResponse, error)
Ping(ctx context.Context, in *PingRequest, opts ...grpc.CallOption) (*PongResponse, error)
Return(ctx context.Context, in *ReturnRequest, opts ...grpc.CallOption) (*ReturnResponse, error)
}
@ -560,6 +661,24 @@ func (c *lLBBridgeClient) ReadFile(ctx context.Context, in *ReadFileRequest, opt
return out, nil
}
func (c *lLBBridgeClient) ReadDir(ctx context.Context, in *ReadDirRequest, opts ...grpc.CallOption) (*ReadDirResponse, error) {
out := new(ReadDirResponse)
err := grpc.Invoke(ctx, "/moby.buildkit.v1.frontend.LLBBridge/ReadDir", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *lLBBridgeClient) StatFile(ctx context.Context, in *StatFileRequest, opts ...grpc.CallOption) (*StatFileResponse, error) {
out := new(StatFileResponse)
err := grpc.Invoke(ctx, "/moby.buildkit.v1.frontend.LLBBridge/StatFile", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *lLBBridgeClient) Ping(ctx context.Context, in *PingRequest, opts ...grpc.CallOption) (*PongResponse, error) {
out := new(PongResponse)
err := grpc.Invoke(ctx, "/moby.buildkit.v1.frontend.LLBBridge/Ping", in, out, c.cc, opts...)
@ -587,6 +706,10 @@ type LLBBridgeServer interface {
Solve(context.Context, *SolveRequest) (*SolveResponse, error)
// apicaps:CapReadFile
ReadFile(context.Context, *ReadFileRequest) (*ReadFileResponse, error)
// apicaps:CapReadDir
ReadDir(context.Context, *ReadDirRequest) (*ReadDirResponse, error)
// apicaps:CapStatFile
StatFile(context.Context, *StatFileRequest) (*StatFileResponse, error)
Ping(context.Context, *PingRequest) (*PongResponse, error)
Return(context.Context, *ReturnRequest) (*ReturnResponse, error)
}
@ -649,6 +772,42 @@ func _LLBBridge_ReadFile_Handler(srv interface{}, ctx context.Context, dec func(
return interceptor(ctx, in, info, handler)
}
func _LLBBridge_ReadDir_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(ReadDirRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(LLBBridgeServer).ReadDir(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/moby.buildkit.v1.frontend.LLBBridge/ReadDir",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(LLBBridgeServer).ReadDir(ctx, req.(*ReadDirRequest))
}
return interceptor(ctx, in, info, handler)
}
func _LLBBridge_StatFile_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(StatFileRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(LLBBridgeServer).StatFile(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/moby.buildkit.v1.frontend.LLBBridge/StatFile",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(LLBBridgeServer).StatFile(ctx, req.(*StatFileRequest))
}
return interceptor(ctx, in, info, handler)
}
func _LLBBridge_Ping_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(PingRequest)
if err := dec(in); err != nil {
@ -701,6 +860,14 @@ var _LLBBridge_serviceDesc = grpc.ServiceDesc{
MethodName: "ReadFile",
Handler: _LLBBridge_ReadFile_Handler,
},
{
MethodName: "ReadDir",
Handler: _LLBBridge_ReadDir_Handler,
},
{
MethodName: "StatFile",
Handler: _LLBBridge_StatFile_Handler,
},
{
MethodName: "Ping",
Handler: _LLBBridge_Ping_Handler,
@ -1169,6 +1336,130 @@ func (m *ReadFileResponse) MarshalTo(dAtA []byte) (int, error) {
return i, nil
}
func (m *ReadDirRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *ReadDirRequest) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if len(m.Ref) > 0 {
dAtA[i] = 0xa
i++
i = encodeVarintGateway(dAtA, i, uint64(len(m.Ref)))
i += copy(dAtA[i:], m.Ref)
}
if len(m.DirPath) > 0 {
dAtA[i] = 0x12
i++
i = encodeVarintGateway(dAtA, i, uint64(len(m.DirPath)))
i += copy(dAtA[i:], m.DirPath)
}
if len(m.IncludePattern) > 0 {
dAtA[i] = 0x1a
i++
i = encodeVarintGateway(dAtA, i, uint64(len(m.IncludePattern)))
i += copy(dAtA[i:], m.IncludePattern)
}
return i, nil
}
func (m *ReadDirResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *ReadDirResponse) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if len(m.Entries) > 0 {
for _, msg := range m.Entries {
dAtA[i] = 0xa
i++
i = encodeVarintGateway(dAtA, i, uint64(msg.Size()))
n, err := msg.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n
}
}
return i, nil
}
func (m *StatFileRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *StatFileRequest) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if len(m.Ref) > 0 {
dAtA[i] = 0xa
i++
i = encodeVarintGateway(dAtA, i, uint64(len(m.Ref)))
i += copy(dAtA[i:], m.Ref)
}
if len(m.Path) > 0 {
dAtA[i] = 0x12
i++
i = encodeVarintGateway(dAtA, i, uint64(len(m.Path)))
i += copy(dAtA[i:], m.Path)
}
return i, nil
}
func (m *StatFileResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *StatFileResponse) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if m.Stat != nil {
dAtA[i] = 0xa
i++
i = encodeVarintGateway(dAtA, i, uint64(m.Stat.Size()))
n9, err := m.Stat.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n9
}
return i, nil
}
func (m *PingRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
@ -1449,6 +1740,60 @@ func (m *ReadFileResponse) Size() (n int) {
return n
}
func (m *ReadDirRequest) Size() (n int) {
var l int
_ = l
l = len(m.Ref)
if l > 0 {
n += 1 + l + sovGateway(uint64(l))
}
l = len(m.DirPath)
if l > 0 {
n += 1 + l + sovGateway(uint64(l))
}
l = len(m.IncludePattern)
if l > 0 {
n += 1 + l + sovGateway(uint64(l))
}
return n
}
func (m *ReadDirResponse) Size() (n int) {
var l int
_ = l
if len(m.Entries) > 0 {
for _, e := range m.Entries {
l = e.Size()
n += 1 + l + sovGateway(uint64(l))
}
}
return n
}
func (m *StatFileRequest) Size() (n int) {
var l int
_ = l
l = len(m.Ref)
if l > 0 {
n += 1 + l + sovGateway(uint64(l))
}
l = len(m.Path)
if l > 0 {
n += 1 + l + sovGateway(uint64(l))
}
return n
}
func (m *StatFileResponse) Size() (n int) {
var l int
_ = l
if m.Stat != nil {
l = m.Stat.Size()
n += 1 + l + sovGateway(uint64(l))
}
return n
}
func (m *PingRequest) Size() (n int) {
var l int
_ = l
@ -3088,6 +3433,415 @@ func (m *ReadFileResponse) Unmarshal(dAtA []byte) error {
}
return nil
}
func (m *ReadDirRequest) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGateway
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: ReadDirRequest: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: ReadDirRequest: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Ref", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGateway
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGateway
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Ref = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field DirPath", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGateway
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGateway
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.DirPath = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field IncludePattern", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGateway
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGateway
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.IncludePattern = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGateway(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthGateway
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *ReadDirResponse) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGateway
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: ReadDirResponse: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: ReadDirResponse: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Entries", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGateway
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGateway
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Entries = append(m.Entries, &fsutil_types.Stat{})
if err := m.Entries[len(m.Entries)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGateway(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthGateway
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *StatFileRequest) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGateway
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: StatFileRequest: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: StatFileRequest: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Ref", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGateway
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGateway
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Ref = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Path", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGateway
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGateway
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Path = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGateway(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthGateway
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *StatFileResponse) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGateway
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: StatFileResponse: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: StatFileResponse: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Stat", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGateway
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthGateway
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.Stat == nil {
m.Stat = &fsutil_types.Stat{}
}
if err := m.Stat.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGateway(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthGateway
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *PingRequest) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
@ -3389,68 +4143,77 @@ var (
func init() { proto.RegisterFile("gateway.proto", fileDescriptorGateway) }
var fileDescriptorGateway = []byte{
// 999 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x56, 0xcf, 0x6f, 0xdb, 0x36,
0x14, 0x8e, 0x22, 0xdb, 0xb1, 0x9f, 0xed, 0xc6, 0x23, 0x86, 0x41, 0xd5, 0x21, 0xf5, 0x84, 0xa1,
0xd3, 0xfa, 0x43, 0xc2, 0xdc, 0x0d, 0xed, 0x5a, 0xa0, 0x5b, 0x9d, 0x34, 0x68, 0x36, 0x67, 0x35,
0xd8, 0x43, 0x81, 0x62, 0x3b, 0xd0, 0x36, 0xa5, 0x08, 0x91, 0x45, 0x8d, 0xa2, 0x93, 0x19, 0xbb,
0x6c, 0x3b, 0xf5, 0xbe, 0x7f, 0xaa, 0xc7, 0x9d, 0x77, 0x08, 0x86, 0xdc, 0xf6, 0x5f, 0x0c, 0xa4,
0x28, 0x47, 0xf9, 0xe5, 0x24, 0x27, 0xf3, 0x51, 0xef, 0x7b, 0xef, 0xe3, 0x7b, 0xdf, 0x23, 0x0d,
0xed, 0x90, 0x08, 0x7a, 0x48, 0xe6, 0x5e, 0xca, 0x99, 0x60, 0xe8, 0xf6, 0x94, 0x8d, 0xe6, 0xde,
0x68, 0x16, 0xc5, 0x93, 0xfd, 0x48, 0x78, 0x07, 0x5f, 0x7a, 0x01, 0x67, 0x89, 0xa0, 0xc9, 0xc4,
0x7e, 0x18, 0x46, 0x62, 0x6f, 0x36, 0xf2, 0xc6, 0x6c, 0xea, 0x87, 0x2c, 0x64, 0xbe, 0x42, 0x8c,
0x66, 0x81, 0xb2, 0x94, 0xa1, 0x56, 0x79, 0x24, 0xbb, 0x77, 0xd6, 0x3d, 0x64, 0x2c, 0x8c, 0x29,
0x49, 0xa3, 0x4c, 0x2f, 0x7d, 0x9e, 0x8e, 0xfd, 0x4c, 0x10, 0x31, 0xcb, 0x34, 0xe6, 0x41, 0x09,
0x23, 0x89, 0xf8, 0x05, 0x11, 0x3f, 0x63, 0xf1, 0x01, 0xe5, 0x7e, 0x3a, 0xf2, 0x59, 0x5a, 0x78,
0xfb, 0x97, 0x7a, 0x93, 0x34, 0xf2, 0xc5, 0x3c, 0xa5, 0x99, 0x7f, 0xc8, 0xf8, 0x3e, 0xe5, 0x1a,
0xf0, 0xe8, 0x52, 0xc0, 0x4c, 0x44, 0xb1, 0x44, 0x8d, 0x49, 0x9a, 0xc9, 0x24, 0xf2, 0x37, 0x07,
0x39, 0xff, 0x19, 0x50, 0xc3, 0x34, 0x9b, 0xc5, 0x02, 0x21, 0x30, 0x39, 0x0d, 0x2c, 0xa3, 0x6b,
0xb8, 0x8d, 0x57, 0x2b, 0x58, 0x1a, 0xe8, 0x31, 0x54, 0x38, 0x0d, 0x32, 0x6b, 0xb5, 0x6b, 0xb8,
0xcd, 0xde, 0xa7, 0xde, 0xa5, 0xf5, 0xf3, 0x30, 0x0d, 0x76, 0x49, 0xfa, 0x6a, 0x05, 0x2b, 0x00,
0xfa, 0x01, 0xea, 0x53, 0x2a, 0xc8, 0x84, 0x08, 0x62, 0x41, 0xd7, 0x74, 0x9b, 0x3d, 0x7f, 0x29,
0x58, 0x32, 0xf0, 0x76, 0x35, 0xe2, 0x65, 0x22, 0xf8, 0x1c, 0x2f, 0x02, 0xd8, 0xcf, 0xa0, 0x7d,
0xea, 0x13, 0xea, 0x80, 0xb9, 0x4f, 0xe7, 0x39, 0x55, 0x2c, 0x97, 0xe8, 0x63, 0xa8, 0x1e, 0x90,
0x78, 0x46, 0x15, 0xd3, 0x16, 0xce, 0x8d, 0xa7, 0xab, 0x4f, 0x8c, 0x7e, 0x1d, 0x6a, 0x5c, 0x85,
0x77, 0xfe, 0x54, 0x67, 0x95, 0x34, 0xd1, 0xb7, 0xfa, 0x5c, 0x86, 0xa2, 0x76, 0xff, 0xca, 0x73,
0xc9, 0x9f, 0x2c, 0xa7, 0xa5, 0x80, 0xf6, 0x63, 0x68, 0x2c, 0xb6, 0xae, 0xa2, 0xd3, 0x28, 0xd1,
0x71, 0x04, 0xb4, 0x31, 0x15, 0x33, 0x9e, 0x60, 0xfa, 0xcb, 0x8c, 0x66, 0x02, 0x7d, 0x53, 0xf0,
0x53, 0xf8, 0xab, 0x8a, 0x2c, 0x1d, 0xb1, 0x06, 0x20, 0x17, 0xaa, 0x94, 0x73, 0xc6, 0x75, 0x7b,
0x90, 0x97, 0x2b, 0xcf, 0xe3, 0xe9, 0xd8, 0x7b, 0xa3, 0x94, 0x87, 0x73, 0x07, 0xa7, 0x03, 0xb7,
0x8a, 0xac, 0x59, 0xca, 0x92, 0x8c, 0x3a, 0x7f, 0x19, 0x70, 0x1b, 0x53, 0x25, 0xbc, 0x9d, 0x29,
0x09, 0xe9, 0x26, 0x4b, 0x82, 0x28, 0x2c, 0x48, 0x75, 0xc0, 0xc4, 0x85, 0x16, 0xb0, 0x5c, 0x22,
0x17, 0xea, 0xc3, 0x98, 0x88, 0x80, 0xf1, 0xa9, 0x4e, 0xd7, 0xf2, 0xd2, 0x91, 0x57, 0xec, 0xe1,
0xc5, 0x57, 0xd4, 0x85, 0xa6, 0x0e, 0xbc, 0xcb, 0x26, 0xd4, 0x32, 0x55, 0x8c, 0xf2, 0x16, 0xb2,
0x60, 0x6d, 0xc0, 0xc2, 0x1f, 0xc9, 0x94, 0x5a, 0x15, 0xf5, 0xb5, 0x30, 0x9d, 0xdf, 0x0d, 0xb0,
0x2f, 0x62, 0x95, 0x93, 0x46, 0xdf, 0x43, 0x6d, 0x2b, 0x0a, 0x69, 0x96, 0xd7, 0xaa, 0xd1, 0xef,
0x7d, 0x38, 0xba, 0xb3, 0xf2, 0xcf, 0xd1, 0x9d, 0x7b, 0x25, 0xe9, 0xb3, 0x94, 0x26, 0x63, 0x96,
0x08, 0x12, 0x25, 0x94, 0xcb, 0x61, 0x7c, 0x38, 0x51, 0x10, 0x2f, 0x47, 0x62, 0x1d, 0x01, 0x7d,
0x02, 0xb5, 0x3c, 0xba, 0x96, 0x8c, 0xb6, 0x9c, 0xf7, 0x26, 0xb4, 0xde, 0x48, 0x02, 0x45, 0x2d,
0x3c, 0x80, 0x2d, 0x1a, 0x44, 0x49, 0x24, 0x22, 0x96, 0xe8, 0x26, 0xdd, 0x92, 0x67, 0x3f, 0xd9,
0xc5, 0x25, 0x0f, 0x64, 0x43, 0x7d, 0x5b, 0x37, 0x4c, 0xb7, 0x7f, 0x61, 0xa3, 0x77, 0xd0, 0x2c,
0xd6, 0xaf, 0x53, 0x61, 0x99, 0x4a, 0x7e, 0x4f, 0x96, 0x74, 0xbc, 0xcc, 0xc4, 0x2b, 0x41, 0x73,
0x2d, 0x96, 0x83, 0x21, 0x17, 0xd6, 0x77, 0xa6, 0x29, 0xe3, 0x62, 0x93, 0x8c, 0xf7, 0xa8, 0x54,
0xa7, 0x55, 0xe9, 0x9a, 0x6e, 0x03, 0x9f, 0xdd, 0x46, 0x0f, 0xe0, 0x23, 0x12, 0xc7, 0xec, 0x50,
0xcb, 0x49, 0x09, 0xc3, 0xaa, 0x76, 0x0d, 0xb7, 0x8e, 0xcf, 0x7f, 0x90, 0x5a, 0xde, 0x8e, 0x12,
0x12, 0x5b, 0xa0, 0x3c, 0x72, 0x03, 0x39, 0xd0, 0x7a, 0xf9, 0xab, 0x0c, 0x4b, 0xf9, 0x0b, 0x21,
0xb8, 0xd5, 0x54, 0x45, 0x3c, 0xb5, 0x67, 0x3f, 0x87, 0xce, 0x59, 0xca, 0x37, 0x9a, 0x95, 0x9f,
0xa0, 0xad, 0xcf, 0xaf, 0xfb, 0xdf, 0x29, 0x5d, 0x51, 0xf9, 0x05, 0x75, 0x32, 0x3d, 0xe6, 0x0d,
0xa7, 0xc7, 0xf9, 0x0d, 0xd6, 0x31, 0x25, 0x93, 0xed, 0x28, 0xa6, 0x97, 0xcb, 0x5e, 0x36, 0x33,
0x8a, 0xe9, 0x90, 0x88, 0xbd, 0x45, 0x33, 0xb5, 0x8d, 0x9e, 0x42, 0x15, 0x93, 0x24, 0xa4, 0x3a,
0xf5, 0x67, 0x4b, 0x52, 0xab, 0x24, 0xd2, 0x17, 0xe7, 0x10, 0xe7, 0x19, 0x34, 0x16, 0x7b, 0x52,
0x8a, 0xaf, 0x83, 0x20, 0xa3, 0xb9, 0xac, 0x4d, 0xac, 0x2d, 0xb9, 0x3f, 0xa0, 0x49, 0xa8, 0x53,
0x9b, 0x58, 0x5b, 0xce, 0x5d, 0xe8, 0x9c, 0x30, 0xd7, 0xa5, 0x41, 0x50, 0xd9, 0x92, 0x97, 0xad,
0xa1, 0xfa, 0xa0, 0xd6, 0x4e, 0x1b, 0x9a, 0xc3, 0x28, 0x29, 0x86, 0xda, 0x39, 0x36, 0xa0, 0x35,
0x64, 0xc9, 0xc9, 0x38, 0x0d, 0x61, 0xbd, 0xe8, 0xcf, 0x8b, 0xe1, 0xce, 0x26, 0x49, 0x8b, 0x0b,
0xb1, 0x7b, 0xfe, 0x28, 0xfa, 0xf9, 0xf0, 0x72, 0xc7, 0x7e, 0x45, 0x4e, 0x1e, 0x3e, 0x0b, 0x47,
0xdf, 0xc1, 0xda, 0x60, 0xd0, 0x57, 0x91, 0x56, 0x6f, 0x14, 0xa9, 0x80, 0xa1, 0xe7, 0xb0, 0xf6,
0x56, 0xbd, 0x6a, 0x99, 0x9e, 0x8e, 0x0b, 0xca, 0xaa, 0x1e, 0x3f, 0x2f, 0x77, 0xc3, 0x74, 0xcc,
0xf8, 0x04, 0x17, 0xa0, 0xde, 0xfb, 0x0a, 0x34, 0x06, 0x83, 0x7e, 0x9f, 0x47, 0x93, 0x90, 0xa2,
0x3f, 0x0c, 0x40, 0xe7, 0xef, 0x13, 0xf4, 0xd5, 0x72, 0x95, 0x5c, 0x7c, 0x29, 0xda, 0x5f, 0xdf,
0x10, 0xa5, 0xab, 0xfc, 0x0e, 0xaa, 0x4a, 0xc5, 0xe8, 0xf3, 0x6b, 0xce, 0xb9, 0xed, 0x5e, 0xed,
0xa8, 0x63, 0x8f, 0xa1, 0x5e, 0x28, 0x01, 0xdd, 0x5b, 0x4a, 0xef, 0x94, 0xd0, 0xed, 0xfb, 0xd7,
0xf2, 0xd5, 0x49, 0xde, 0x42, 0x45, 0xca, 0x08, 0xdd, 0x5d, 0x02, 0x2a, 0xe9, 0xcc, 0x5e, 0x76,
0xce, 0x53, 0xfa, 0xfb, 0x59, 0xbe, 0xc7, 0xea, 0x8e, 0x71, 0x97, 0xf2, 0x29, 0x3d, 0x97, 0xf6,
0x17, 0xd7, 0xf0, 0xcc, 0xc3, 0xf7, 0x5b, 0x1f, 0x8e, 0x37, 0x8c, 0xbf, 0x8f, 0x37, 0x8c, 0x7f,
0x8f, 0x37, 0x8c, 0x51, 0x4d, 0xfd, 0xe1, 0x79, 0xf4, 0x7f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x0f,
0xfd, 0x24, 0x08, 0x13, 0x0a, 0x00, 0x00,
// 1144 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x56, 0xcf, 0x4f, 0x1b, 0xc7,
0x17, 0x67, 0xb1, 0x8d, 0xed, 0x67, 0x03, 0xfe, 0x8e, 0xbe, 0xaa, 0x36, 0x7b, 0x20, 0xee, 0xaa,
0xa2, 0x0e, 0x21, 0xbb, 0x2a, 0x69, 0x45, 0x4a, 0xa4, 0xa4, 0x31, 0x04, 0x85, 0xd6, 0x34, 0xd6,
0xe4, 0x10, 0x29, 0x6a, 0xa5, 0xae, 0xed, 0xf1, 0x32, 0x62, 0xbd, 0xb3, 0x9d, 0x1d, 0x43, 0x51,
0x2f, 0x6d, 0x4f, 0xbd, 0xf7, 0x9f, 0xca, 0xad, 0x3d, 0xf7, 0x10, 0x55, 0xdc, 0xfa, 0x5f, 0x54,
0xf3, 0x63, 0xed, 0xc5, 0x80, 0x81, 0xd3, 0xce, 0x9b, 0x79, 0x9f, 0xf7, 0x3e, 0x6f, 0xde, 0x8f,
0x59, 0x58, 0x0e, 0x03, 0x41, 0x4e, 0x83, 0x33, 0x2f, 0xe1, 0x4c, 0x30, 0x74, 0x6f, 0xc4, 0x7a,
0x67, 0x5e, 0x6f, 0x4c, 0xa3, 0xc1, 0x31, 0x15, 0xde, 0xc9, 0x67, 0xde, 0x90, 0xb3, 0x58, 0x90,
0x78, 0xe0, 0x3c, 0x0a, 0xa9, 0x38, 0x1a, 0xf7, 0xbc, 0x3e, 0x1b, 0xf9, 0x21, 0x0b, 0x99, 0xaf,
0x10, 0xbd, 0xf1, 0x50, 0x49, 0x4a, 0x50, 0x2b, 0x6d, 0xc9, 0xd9, 0x9a, 0x55, 0x0f, 0x19, 0x0b,
0x23, 0x12, 0x24, 0x34, 0x35, 0x4b, 0x9f, 0x27, 0x7d, 0x3f, 0x15, 0x81, 0x18, 0xa7, 0x06, 0xb3,
0x99, 0xc3, 0x48, 0x22, 0x7e, 0x46, 0xc4, 0x4f, 0x59, 0x74, 0x42, 0xb8, 0x9f, 0xf4, 0x7c, 0x96,
0x64, 0xda, 0xfe, 0xb5, 0xda, 0x41, 0x42, 0x7d, 0x71, 0x96, 0x90, 0xd4, 0x3f, 0x65, 0xfc, 0x98,
0x70, 0x03, 0x78, 0x7c, 0x2d, 0x60, 0x2c, 0x68, 0x24, 0x51, 0xfd, 0x20, 0x49, 0xa5, 0x13, 0xf9,
0x35, 0xa0, 0x7c, 0xd8, 0x82, 0xc5, 0x34, 0x15, 0x94, 0x86, 0xd4, 0x1f, 0xa6, 0x0a, 0xa3, 0xbd,
0xc8, 0x20, 0xb4, 0xba, 0xfb, 0xaf, 0x05, 0x4b, 0x98, 0xa4, 0xe3, 0x48, 0x20, 0x04, 0x05, 0x4e,
0x86, 0xb6, 0xd5, 0xb4, 0x5a, 0xd5, 0x57, 0x0b, 0x58, 0x0a, 0x68, 0x1b, 0x8a, 0x9c, 0x0c, 0x53,
0x7b, 0xb1, 0x69, 0xb5, 0x6a, 0x5b, 0x1f, 0x7b, 0xd7, 0x5e, 0xb7, 0x87, 0xc9, 0xf0, 0x30, 0x48,
0x5e, 0x2d, 0x60, 0x05, 0x40, 0xdf, 0x40, 0x65, 0x44, 0x44, 0x30, 0x08, 0x44, 0x60, 0x43, 0xb3,
0xd0, 0xaa, 0x6d, 0xf9, 0x73, 0xc1, 0x92, 0x81, 0x77, 0x68, 0x10, 0x2f, 0x63, 0xc1, 0xcf, 0xf0,
0xc4, 0x80, 0xf3, 0x14, 0x96, 0x2f, 0x1c, 0xa1, 0x06, 0x14, 0x8e, 0xc9, 0x99, 0xa6, 0x8a, 0xe5,
0x12, 0xfd, 0x1f, 0x4a, 0x27, 0x41, 0x34, 0x26, 0x8a, 0x69, 0x1d, 0x6b, 0x61, 0x67, 0xf1, 0x89,
0xd5, 0xae, 0xc0, 0x12, 0x57, 0xe6, 0xdd, 0xdf, 0x54, 0xac, 0x92, 0x26, 0x7a, 0x6e, 0xe2, 0xb2,
0x14, 0xb5, 0x87, 0x37, 0xc6, 0x25, 0x3f, 0xa9, 0xa6, 0xa5, 0x80, 0xce, 0x36, 0x54, 0x27, 0x5b,
0x37, 0xd1, 0xa9, 0xe6, 0xe8, 0xb8, 0x02, 0x96, 0x31, 0x11, 0x63, 0x1e, 0x63, 0xf2, 0xe3, 0x98,
0xa4, 0x02, 0x7d, 0x99, 0xf1, 0x53, 0xf8, 0x9b, 0x2e, 0x59, 0x2a, 0x62, 0x03, 0x40, 0x2d, 0x28,
0x11, 0xce, 0x19, 0x37, 0xe9, 0x41, 0x9e, 0x2e, 0x54, 0x8f, 0x27, 0x7d, 0xef, 0x8d, 0x2a, 0x54,
0xac, 0x15, 0xdc, 0x06, 0xac, 0x64, 0x5e, 0xd3, 0x84, 0xc5, 0x29, 0x71, 0xff, 0xb0, 0xe0, 0x1e,
0x26, 0xaa, 0x4e, 0x0f, 0x46, 0x41, 0x48, 0x76, 0x59, 0x3c, 0xa4, 0x61, 0x46, 0xaa, 0x01, 0x05,
0x9c, 0xd5, 0x02, 0x96, 0x4b, 0xd4, 0x82, 0x4a, 0x37, 0x0a, 0xc4, 0x90, 0xf1, 0x91, 0x71, 0x57,
0xf7, 0x92, 0x9e, 0x97, 0xed, 0xe1, 0xc9, 0x29, 0x6a, 0x42, 0xcd, 0x18, 0x3e, 0x64, 0x03, 0x62,
0x17, 0x94, 0x8d, 0xfc, 0x16, 0xb2, 0xa1, 0xdc, 0x61, 0xe1, 0xb7, 0xc1, 0x88, 0xd8, 0x45, 0x75,
0x9a, 0x89, 0xee, 0x2f, 0x16, 0x38, 0x57, 0xb1, 0xd2, 0xa4, 0xd1, 0xd7, 0xb0, 0xb4, 0x47, 0x43,
0x92, 0xea, 0xbb, 0xaa, 0xb6, 0xb7, 0xde, 0x7f, 0xb8, 0xbf, 0xf0, 0xf7, 0x87, 0xfb, 0x1b, 0xb9,
0xa2, 0x67, 0x09, 0x89, 0xfb, 0x2c, 0x16, 0x01, 0x8d, 0x09, 0x97, 0xbd, 0xfb, 0x68, 0xa0, 0x20,
0x9e, 0x46, 0x62, 0x63, 0x01, 0x7d, 0x04, 0x4b, 0xda, 0xba, 0x29, 0x19, 0x23, 0xb9, 0xbf, 0x17,
0xa0, 0xfe, 0x46, 0x12, 0xc8, 0xee, 0xc2, 0x03, 0xd8, 0x23, 0x43, 0x1a, 0x53, 0x41, 0x59, 0x6c,
0x92, 0xb4, 0x22, 0x63, 0x9f, 0xee, 0xe2, 0x9c, 0x06, 0x72, 0xa0, 0xb2, 0x6f, 0x12, 0x66, 0xd2,
0x3f, 0x91, 0xd1, 0x3b, 0xa8, 0x65, 0xeb, 0xd7, 0x89, 0xb0, 0x0b, 0xaa, 0xfc, 0x9e, 0xcc, 0xc9,
0x78, 0x9e, 0x89, 0x97, 0x83, 0xea, 0x5a, 0xcc, 0x1b, 0x43, 0x2d, 0x58, 0x3d, 0x18, 0x25, 0x8c,
0x8b, 0xdd, 0xa0, 0x7f, 0x44, 0x64, 0x75, 0xda, 0xc5, 0x66, 0xa1, 0x55, 0xc5, 0xb3, 0xdb, 0x68,
0x13, 0xfe, 0x17, 0x44, 0x11, 0x3b, 0x35, 0xe5, 0xa4, 0x0a, 0xc3, 0x2e, 0x35, 0xad, 0x56, 0x05,
0x5f, 0x3e, 0x90, 0xb5, 0xbc, 0x4f, 0xe3, 0x20, 0xb2, 0x41, 0x69, 0x68, 0x01, 0xb9, 0x50, 0x7f,
0xf9, 0x93, 0x34, 0x4b, 0xf8, 0x0b, 0x21, 0xb8, 0x5d, 0x53, 0x97, 0x78, 0x61, 0xcf, 0x79, 0x06,
0x8d, 0x59, 0xca, 0x77, 0xea, 0x95, 0xef, 0x60, 0xd9, 0xc4, 0x6f, 0xf2, 0xdf, 0xc8, 0x8d, 0x28,
0x3d, 0xa0, 0xa6, 0xdd, 0x53, 0xb8, 0x63, 0xf7, 0xb8, 0x3f, 0xc3, 0x2a, 0x26, 0xc1, 0x60, 0x9f,
0x46, 0xe4, 0xfa, 0xb2, 0x97, 0xc9, 0xa4, 0x11, 0xe9, 0x06, 0xe2, 0x68, 0x92, 0x4c, 0x23, 0xa3,
0x1d, 0x28, 0xe1, 0x20, 0x0e, 0x89, 0x71, 0xfd, 0xc9, 0x1c, 0xd7, 0xca, 0x89, 0xd4, 0xc5, 0x1a,
0xe2, 0x3e, 0x85, 0xea, 0x64, 0x4f, 0x96, 0xe2, 0xeb, 0xe1, 0x30, 0x25, 0xba, 0xac, 0x0b, 0xd8,
0x48, 0x72, 0xbf, 0x43, 0xe2, 0xd0, 0xb8, 0x2e, 0x60, 0x23, 0xb9, 0xeb, 0xd0, 0x98, 0x32, 0x37,
0x57, 0x83, 0xa0, 0xb8, 0x27, 0x87, 0xad, 0xa5, 0xf2, 0xa0, 0xd6, 0xee, 0x40, 0x76, 0x7d, 0x30,
0xd8, 0xa3, 0xfc, 0xfa, 0x00, 0x6d, 0x28, 0xef, 0x51, 0x9e, 0x8b, 0x2f, 0x13, 0xd1, 0x3a, 0xac,
0x1c, 0xc4, 0xfd, 0x68, 0x3c, 0x90, 0xd1, 0x0a, 0xc2, 0x63, 0xd3, 0xca, 0x33, 0xbb, 0xee, 0x73,
0x7d, 0x8f, 0xca, 0x8b, 0x21, 0xb3, 0x09, 0x65, 0x12, 0x0b, 0x4e, 0x49, 0x36, 0x61, 0x91, 0xa7,
0x1f, 0x20, 0x4f, 0x3d, 0x40, 0x6a, 0x38, 0xe1, 0x4c, 0xc5, 0xdd, 0x86, 0x55, 0xb9, 0x31, 0x3f,
0x11, 0x08, 0x8a, 0x39, 0x92, 0x6a, 0xed, 0xee, 0x40, 0x63, 0x0a, 0x34, 0xae, 0xd7, 0xa1, 0x28,
0x9f, 0x37, 0xd3, 0xa7, 0x57, 0xf9, 0x55, 0xe7, 0xee, 0x32, 0xd4, 0xba, 0x34, 0xce, 0x06, 0x9e,
0x7b, 0x6e, 0x41, 0xbd, 0xcb, 0xe2, 0xe9, 0xa8, 0xe9, 0xc2, 0x6a, 0x56, 0xbb, 0x2f, 0xba, 0x07,
0xbb, 0x41, 0x92, 0x85, 0xd2, 0xbc, 0x9c, 0x66, 0xf3, 0x12, 0x7b, 0x5a, 0xb1, 0x5d, 0x94, 0x53,
0x09, 0xcf, 0xc2, 0xd1, 0x57, 0x50, 0xee, 0x74, 0xda, 0xca, 0xd2, 0xe2, 0x9d, 0x2c, 0x65, 0x30,
0xf4, 0x0c, 0xca, 0x6f, 0xd5, 0x0f, 0x42, 0x6a, 0x26, 0xc7, 0x15, 0x25, 0xa7, 0x03, 0xd5, 0x6a,
0x98, 0xf4, 0x19, 0x1f, 0xe0, 0x0c, 0xb4, 0xf5, 0x67, 0x09, 0xaa, 0x9d, 0x4e, 0xbb, 0xcd, 0xe9,
0x20, 0x24, 0xe8, 0x57, 0x0b, 0xd0, 0xe5, 0x59, 0x8b, 0x3e, 0x9f, 0xdf, 0x41, 0x57, 0x3f, 0x18,
0xce, 0x17, 0x77, 0x44, 0x99, 0x5b, 0x7e, 0x07, 0x25, 0xd5, 0xe1, 0xe8, 0xd3, 0x5b, 0xce, 0x40,
0xa7, 0x75, 0xb3, 0xa2, 0xb1, 0xdd, 0x87, 0x4a, 0xd6, 0x25, 0x68, 0x63, 0x2e, 0xbd, 0x0b, 0x43,
0xc0, 0x79, 0x78, 0x2b, 0x5d, 0xe3, 0xe4, 0x07, 0x28, 0x9b, 0xe2, 0x47, 0x0f, 0x6e, 0xc0, 0x4d,
0xdb, 0xd0, 0xd9, 0xb8, 0x8d, 0xea, 0x34, 0x8c, 0xac, 0xc8, 0xe7, 0x86, 0x31, 0xd3, 0x42, 0x73,
0xc3, 0xb8, 0xd4, 0x35, 0x6f, 0xa1, 0x28, 0xbb, 0x01, 0xad, 0xcf, 0x01, 0xe5, 0xda, 0xc5, 0x99,
0x97, 0xae, 0x0b, 0x6d, 0xf4, 0xbd, 0xfc, 0xe5, 0x52, 0xcf, 0x48, 0x6b, 0x6e, 0xcc, 0xb9, 0x3f,
0x22, 0xe7, 0xc1, 0x2d, 0x34, 0xb5, 0xf9, 0x76, 0xfd, 0xfd, 0xf9, 0x9a, 0xf5, 0xd7, 0xf9, 0x9a,
0xf5, 0xcf, 0xf9, 0x9a, 0xd5, 0x5b, 0x52, 0xff, 0xb4, 0x8f, 0xff, 0x0b, 0x00, 0x00, 0xff, 0xff,
0x80, 0x7e, 0xd2, 0xb5, 0x25, 0x0c, 0x00, 0x00,
}

View File

@ -7,6 +7,7 @@ import "github.com/gogo/googleapis/google/rpc/status.proto";
import "github.com/moby/buildkit/solver/pb/ops.proto";
import "github.com/moby/buildkit/api/types/worker.proto";
import "github.com/moby/buildkit/util/apicaps/pb/caps.proto";
import "github.com/tonistiigi/fsutil/types/stat.proto";
option (gogoproto.sizer_all) = true;
option (gogoproto.marshaler_all) = true;
@ -19,6 +20,10 @@ service LLBBridge {
rpc Solve(SolveRequest) returns (SolveResponse);
// apicaps:CapReadFile
rpc ReadFile(ReadFileRequest) returns (ReadFileResponse);
// apicaps:CapReadDir
rpc ReadDir(ReadDirRequest) returns (ReadDirResponse);
// apicaps:CapStatFile
rpc StatFile(StatFileRequest) returns (StatFileResponse);
rpc Ping(PingRequest) returns (PongResponse);
rpc Return(ReturnRequest) returns (ReturnResponse);
}
@ -92,6 +97,25 @@ message ReadFileResponse {
bytes Data = 1;
}
message ReadDirRequest {
string Ref = 1;
string DirPath = 2;
string IncludePattern = 3;
}
message ReadDirResponse {
repeated fsutil.types.Stat entries = 1;
}
message StatFileRequest {
string Ref = 1;
string Path = 2;
}
message StatFileResponse {
fsutil.types.Stat stat = 1;
}
message PingRequest{
}
message PongResponse{

View File

@ -9,6 +9,7 @@ import (
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/tonistiigi/fsutil"
fstypes "github.com/tonistiigi/fsutil/types"
"google.golang.org/grpc"
)
@ -81,10 +82,10 @@ func syncTargetDiffCopy(ds grpc.Stream, dest string) error {
}
return fsutil.Receive(ds.Context(), ds, dest, fsutil.ReceiveOpt{
Merge: true,
Filter: func() func(*fsutil.Stat) bool {
Filter: func() func(*fstypes.Stat) bool {
uid := os.Getuid()
gid := os.Getgid()
return func(st *fsutil.Stat) bool {
return func(st *fstypes.Stat) bool {
st.Uid = uint32(uid)
st.Gid = uint32(gid)
return true

View File

@ -10,6 +10,7 @@ import (
"github.com/moby/buildkit/session"
"github.com/pkg/errors"
"github.com/tonistiigi/fsutil"
fstypes "github.com/tonistiigi/fsutil/types"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/metadata"
@ -34,7 +35,7 @@ type SyncedDir struct {
Name string
Dir string
Excludes []string
Map func(*fsutil.Stat) bool
Map func(*fstypes.Stat) bool
}
// NewFSSyncProvider creates a new provider for sending files from client

View File

@ -7,6 +7,7 @@ import (
"google.golang.org/grpc/metadata"
)
// Hijack hijacks session to a connection.
func Hijack(stream controlapi.Control_SessionServer) (net.Conn, <-chan struct{}, map[string][]string) {
md, _ := metadata.FromIncomingContext(stream.Context())
c, closeCh := streamToConn(stream)

View File

@ -0,0 +1,61 @@
package sshforward
import (
io "io"
context "golang.org/x/net/context"
"golang.org/x/sync/errgroup"
"google.golang.org/grpc"
)
func Copy(ctx context.Context, conn io.ReadWriteCloser, stream grpc.Stream) error {
g, ctx := errgroup.WithContext(ctx)
g.Go(func() (retErr error) {
p := &BytesMessage{}
for {
if err := stream.RecvMsg(p); err != nil {
if err == io.EOF {
return nil
}
conn.Close()
return err
}
select {
case <-ctx.Done():
conn.Close()
return ctx.Err()
default:
}
if _, err := conn.Write(p.Data); err != nil {
conn.Close()
return err
}
p.Data = p.Data[:0]
}
})
g.Go(func() (retErr error) {
for {
buf := make([]byte, 32*1024)
n, err := conn.Read(buf)
switch {
case err == io.EOF:
return nil
case err != nil:
return err
}
select {
case <-ctx.Done():
return ctx.Err()
default:
}
p := &BytesMessage{Data: buf[:n]}
if err := stream.SendMsg(p); err != nil {
return err
}
}
})
return g.Wait()
}

View File

@ -0,0 +1,3 @@
package sshforward
//go:generate protoc --gogoslick_out=plugins=grpc:. ssh.proto

View File

@ -0,0 +1,113 @@
package sshforward
import (
"io/ioutil"
"net"
"os"
"path/filepath"
"github.com/moby/buildkit/session"
context "golang.org/x/net/context"
"golang.org/x/sync/errgroup"
"google.golang.org/grpc/metadata"
)
// DefaultID is the default ssh ID
const DefaultID = "default"
const KeySSHID = "buildkit.ssh.id"
type server struct {
caller session.Caller
}
func (s *server) run(ctx context.Context, l net.Listener, id string) error {
eg, ctx := errgroup.WithContext(ctx)
eg.Go(func() error {
<-ctx.Done()
return ctx.Err()
})
eg.Go(func() error {
for {
conn, err := l.Accept()
if err != nil {
return err
}
client := NewSSHClient(s.caller.Conn())
opts := make(map[string][]string)
opts[KeySSHID] = []string{id}
ctx = metadata.NewOutgoingContext(ctx, opts)
stream, err := client.ForwardAgent(ctx)
if err != nil {
conn.Close()
return err
}
go Copy(ctx, conn, stream)
}
})
return eg.Wait()
}
type SocketOpt struct {
ID string
UID int
GID int
Mode int
}
func MountSSHSocket(ctx context.Context, c session.Caller, opt SocketOpt) (sockPath string, closer func() error, err error) {
dir, err := ioutil.TempDir("", ".buildkit-ssh-sock")
if err != nil {
return "", nil, err
}
defer func() {
if err != nil {
os.RemoveAll(dir)
}
}()
sockPath = filepath.Join(dir, "ssh_auth_sock")
l, err := net.Listen("unix", sockPath)
if err != nil {
return "", nil, err
}
if err := os.Chown(sockPath, opt.UID, opt.GID); err != nil {
l.Close()
return "", nil, err
}
if err := os.Chmod(sockPath, os.FileMode(opt.Mode)); err != nil {
l.Close()
return "", nil, err
}
s := &server{caller: c}
id := opt.ID
if id == "" {
id = DefaultID
}
go s.run(ctx, l, id) // erroring per connection allowed
return sockPath, func() error {
err := l.Close()
os.RemoveAll(sockPath)
return err
}, nil
}
func CheckSSHID(ctx context.Context, c session.Caller, id string) error {
client := NewSSHClient(c.Conn())
_, err := client.CheckAgent(ctx, &CheckAgentRequest{ID: id})
return err
}

View File

@ -0,0 +1,816 @@
// Code generated by protoc-gen-gogo. DO NOT EDIT.
// source: ssh.proto
/*
Package sshforward is a generated protocol buffer package.
It is generated from these files:
ssh.proto
It has these top-level messages:
BytesMessage
CheckAgentRequest
CheckAgentResponse
*/
package sshforward
import proto "github.com/gogo/protobuf/proto"
import fmt "fmt"
import math "math"
import bytes "bytes"
import strings "strings"
import reflect "reflect"
import context "golang.org/x/net/context"
import grpc "google.golang.org/grpc"
import io "io"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package
// BytesMessage contains a chunk of byte data
type BytesMessage struct {
Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"`
}
func (m *BytesMessage) Reset() { *m = BytesMessage{} }
func (*BytesMessage) ProtoMessage() {}
func (*BytesMessage) Descriptor() ([]byte, []int) { return fileDescriptorSsh, []int{0} }
func (m *BytesMessage) GetData() []byte {
if m != nil {
return m.Data
}
return nil
}
type CheckAgentRequest struct {
ID string `protobuf:"bytes,1,opt,name=ID,proto3" json:"ID,omitempty"`
}
func (m *CheckAgentRequest) Reset() { *m = CheckAgentRequest{} }
func (*CheckAgentRequest) ProtoMessage() {}
func (*CheckAgentRequest) Descriptor() ([]byte, []int) { return fileDescriptorSsh, []int{1} }
func (m *CheckAgentRequest) GetID() string {
if m != nil {
return m.ID
}
return ""
}
type CheckAgentResponse struct {
}
func (m *CheckAgentResponse) Reset() { *m = CheckAgentResponse{} }
func (*CheckAgentResponse) ProtoMessage() {}
func (*CheckAgentResponse) Descriptor() ([]byte, []int) { return fileDescriptorSsh, []int{2} }
func init() {
proto.RegisterType((*BytesMessage)(nil), "moby.sshforward.v1.BytesMessage")
proto.RegisterType((*CheckAgentRequest)(nil), "moby.sshforward.v1.CheckAgentRequest")
proto.RegisterType((*CheckAgentResponse)(nil), "moby.sshforward.v1.CheckAgentResponse")
}
func (this *BytesMessage) Equal(that interface{}) bool {
if that == nil {
return this == nil
}
that1, ok := that.(*BytesMessage)
if !ok {
that2, ok := that.(BytesMessage)
if ok {
that1 = &that2
} else {
return false
}
}
if that1 == nil {
return this == nil
} else if this == nil {
return false
}
if !bytes.Equal(this.Data, that1.Data) {
return false
}
return true
}
func (this *CheckAgentRequest) Equal(that interface{}) bool {
if that == nil {
return this == nil
}
that1, ok := that.(*CheckAgentRequest)
if !ok {
that2, ok := that.(CheckAgentRequest)
if ok {
that1 = &that2
} else {
return false
}
}
if that1 == nil {
return this == nil
} else if this == nil {
return false
}
if this.ID != that1.ID {
return false
}
return true
}
func (this *CheckAgentResponse) Equal(that interface{}) bool {
if that == nil {
return this == nil
}
that1, ok := that.(*CheckAgentResponse)
if !ok {
that2, ok := that.(CheckAgentResponse)
if ok {
that1 = &that2
} else {
return false
}
}
if that1 == nil {
return this == nil
} else if this == nil {
return false
}
return true
}
func (this *BytesMessage) GoString() string {
if this == nil {
return "nil"
}
s := make([]string, 0, 5)
s = append(s, "&sshforward.BytesMessage{")
s = append(s, "Data: "+fmt.Sprintf("%#v", this.Data)+",\n")
s = append(s, "}")
return strings.Join(s, "")
}
func (this *CheckAgentRequest) GoString() string {
if this == nil {
return "nil"
}
s := make([]string, 0, 5)
s = append(s, "&sshforward.CheckAgentRequest{")
s = append(s, "ID: "+fmt.Sprintf("%#v", this.ID)+",\n")
s = append(s, "}")
return strings.Join(s, "")
}
func (this *CheckAgentResponse) GoString() string {
if this == nil {
return "nil"
}
s := make([]string, 0, 4)
s = append(s, "&sshforward.CheckAgentResponse{")
s = append(s, "}")
return strings.Join(s, "")
}
func valueToGoStringSsh(v interface{}, typ string) string {
rv := reflect.ValueOf(v)
if rv.IsNil() {
return "nil"
}
pv := reflect.Indirect(rv).Interface()
return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv)
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConn
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion4
// Client API for SSH service
type SSHClient interface {
CheckAgent(ctx context.Context, in *CheckAgentRequest, opts ...grpc.CallOption) (*CheckAgentResponse, error)
ForwardAgent(ctx context.Context, opts ...grpc.CallOption) (SSH_ForwardAgentClient, error)
}
type sSHClient struct {
cc *grpc.ClientConn
}
func NewSSHClient(cc *grpc.ClientConn) SSHClient {
return &sSHClient{cc}
}
func (c *sSHClient) CheckAgent(ctx context.Context, in *CheckAgentRequest, opts ...grpc.CallOption) (*CheckAgentResponse, error) {
out := new(CheckAgentResponse)
err := grpc.Invoke(ctx, "/moby.sshforward.v1.SSH/CheckAgent", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *sSHClient) ForwardAgent(ctx context.Context, opts ...grpc.CallOption) (SSH_ForwardAgentClient, error) {
stream, err := grpc.NewClientStream(ctx, &_SSH_serviceDesc.Streams[0], c.cc, "/moby.sshforward.v1.SSH/ForwardAgent", opts...)
if err != nil {
return nil, err
}
x := &sSHForwardAgentClient{stream}
return x, nil
}
type SSH_ForwardAgentClient interface {
Send(*BytesMessage) error
Recv() (*BytesMessage, error)
grpc.ClientStream
}
type sSHForwardAgentClient struct {
grpc.ClientStream
}
func (x *sSHForwardAgentClient) Send(m *BytesMessage) error {
return x.ClientStream.SendMsg(m)
}
func (x *sSHForwardAgentClient) Recv() (*BytesMessage, error) {
m := new(BytesMessage)
if err := x.ClientStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
// Server API for SSH service
type SSHServer interface {
CheckAgent(context.Context, *CheckAgentRequest) (*CheckAgentResponse, error)
ForwardAgent(SSH_ForwardAgentServer) error
}
func RegisterSSHServer(s *grpc.Server, srv SSHServer) {
s.RegisterService(&_SSH_serviceDesc, srv)
}
func _SSH_CheckAgent_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(CheckAgentRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(SSHServer).CheckAgent(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/moby.sshforward.v1.SSH/CheckAgent",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(SSHServer).CheckAgent(ctx, req.(*CheckAgentRequest))
}
return interceptor(ctx, in, info, handler)
}
func _SSH_ForwardAgent_Handler(srv interface{}, stream grpc.ServerStream) error {
return srv.(SSHServer).ForwardAgent(&sSHForwardAgentServer{stream})
}
type SSH_ForwardAgentServer interface {
Send(*BytesMessage) error
Recv() (*BytesMessage, error)
grpc.ServerStream
}
type sSHForwardAgentServer struct {
grpc.ServerStream
}
func (x *sSHForwardAgentServer) Send(m *BytesMessage) error {
return x.ServerStream.SendMsg(m)
}
func (x *sSHForwardAgentServer) Recv() (*BytesMessage, error) {
m := new(BytesMessage)
if err := x.ServerStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
var _SSH_serviceDesc = grpc.ServiceDesc{
ServiceName: "moby.sshforward.v1.SSH",
HandlerType: (*SSHServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "CheckAgent",
Handler: _SSH_CheckAgent_Handler,
},
},
Streams: []grpc.StreamDesc{
{
StreamName: "ForwardAgent",
Handler: _SSH_ForwardAgent_Handler,
ServerStreams: true,
ClientStreams: true,
},
},
Metadata: "ssh.proto",
}
func (m *BytesMessage) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *BytesMessage) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if len(m.Data) > 0 {
dAtA[i] = 0xa
i++
i = encodeVarintSsh(dAtA, i, uint64(len(m.Data)))
i += copy(dAtA[i:], m.Data)
}
return i, nil
}
func (m *CheckAgentRequest) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *CheckAgentRequest) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if len(m.ID) > 0 {
dAtA[i] = 0xa
i++
i = encodeVarintSsh(dAtA, i, uint64(len(m.ID)))
i += copy(dAtA[i:], m.ID)
}
return i, nil
}
func (m *CheckAgentResponse) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *CheckAgentResponse) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
return i, nil
}
func encodeVarintSsh(dAtA []byte, offset int, v uint64) int {
for v >= 1<<7 {
dAtA[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
dAtA[offset] = uint8(v)
return offset + 1
}
func (m *BytesMessage) Size() (n int) {
var l int
_ = l
l = len(m.Data)
if l > 0 {
n += 1 + l + sovSsh(uint64(l))
}
return n
}
func (m *CheckAgentRequest) Size() (n int) {
var l int
_ = l
l = len(m.ID)
if l > 0 {
n += 1 + l + sovSsh(uint64(l))
}
return n
}
func (m *CheckAgentResponse) Size() (n int) {
var l int
_ = l
return n
}
func sovSsh(x uint64) (n int) {
for {
n++
x >>= 7
if x == 0 {
break
}
}
return n
}
func sozSsh(x uint64) (n int) {
return sovSsh(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
func (this *BytesMessage) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&BytesMessage{`,
`Data:` + fmt.Sprintf("%v", this.Data) + `,`,
`}`,
}, "")
return s
}
func (this *CheckAgentRequest) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&CheckAgentRequest{`,
`ID:` + fmt.Sprintf("%v", this.ID) + `,`,
`}`,
}, "")
return s
}
func (this *CheckAgentResponse) String() string {
if this == nil {
return "nil"
}
s := strings.Join([]string{`&CheckAgentResponse{`,
`}`,
}, "")
return s
}
func valueToStringSsh(v interface{}) string {
rv := reflect.ValueOf(v)
if rv.IsNil() {
return "nil"
}
pv := reflect.Indirect(rv).Interface()
return fmt.Sprintf("*%v", pv)
}
func (m *BytesMessage) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowSsh
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: BytesMessage: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: BytesMessage: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Data", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowSsh
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthSsh
}
postIndex := iNdEx + byteLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Data = append(m.Data[:0], dAtA[iNdEx:postIndex]...)
if m.Data == nil {
m.Data = []byte{}
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipSsh(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthSsh
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *CheckAgentRequest) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowSsh
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: CheckAgentRequest: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: CheckAgentRequest: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowSsh
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthSsh
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.ID = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipSsh(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthSsh
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *CheckAgentResponse) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowSsh
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: CheckAgentResponse: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: CheckAgentResponse: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
default:
iNdEx = preIndex
skippy, err := skipSsh(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthSsh
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func skipSsh(dAtA []byte) (n int, err error) {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowSsh
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
wireType := int(wire & 0x7)
switch wireType {
case 0:
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowSsh
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
iNdEx++
if dAtA[iNdEx-1] < 0x80 {
break
}
}
return iNdEx, nil
case 1:
iNdEx += 8
return iNdEx, nil
case 2:
var length int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowSsh
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
length |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
iNdEx += length
if length < 0 {
return 0, ErrInvalidLengthSsh
}
return iNdEx, nil
case 3:
for {
var innerWire uint64
var start int = iNdEx
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowSsh
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
innerWire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
innerWireType := int(innerWire & 0x7)
if innerWireType == 4 {
break
}
next, err := skipSsh(dAtA[start:])
if err != nil {
return 0, err
}
iNdEx = start + next
}
return iNdEx, nil
case 4:
return iNdEx, nil
case 5:
iNdEx += 4
return iNdEx, nil
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
}
panic("unreachable")
}
var (
ErrInvalidLengthSsh = fmt.Errorf("proto: negative length found during unmarshaling")
ErrIntOverflowSsh = fmt.Errorf("proto: integer overflow")
)
func init() { proto.RegisterFile("ssh.proto", fileDescriptorSsh) }
var fileDescriptorSsh = []byte{
// 243 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0xe2, 0x2c, 0x2e, 0xce, 0xd0,
0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0x12, 0xca, 0xcd, 0x4f, 0xaa, 0xd4, 0x2b, 0x2e, 0xce, 0x48,
0xcb, 0x2f, 0x2a, 0x4f, 0x2c, 0x4a, 0xd1, 0x2b, 0x33, 0x54, 0x52, 0xe2, 0xe2, 0x71, 0xaa, 0x2c,
0x49, 0x2d, 0xf6, 0x4d, 0x2d, 0x2e, 0x4e, 0x4c, 0x4f, 0x15, 0x12, 0xe2, 0x62, 0x49, 0x49, 0x2c,
0x49, 0x94, 0x60, 0x54, 0x60, 0xd4, 0xe0, 0x09, 0x02, 0xb3, 0x95, 0x94, 0xb9, 0x04, 0x9d, 0x33,
0x52, 0x93, 0xb3, 0x1d, 0xd3, 0x53, 0xf3, 0x4a, 0x82, 0x52, 0x0b, 0x4b, 0x53, 0x8b, 0x4b, 0x84,
0xf8, 0xb8, 0x98, 0x3c, 0x5d, 0xc0, 0xca, 0x38, 0x83, 0x98, 0x3c, 0x5d, 0x94, 0x44, 0xb8, 0x84,
0x90, 0x15, 0x15, 0x17, 0xe4, 0xe7, 0x15, 0xa7, 0x1a, 0xed, 0x62, 0xe4, 0x62, 0x0e, 0x0e, 0xf6,
0x10, 0x8a, 0xe6, 0xe2, 0x42, 0xc8, 0x0a, 0xa9, 0xea, 0x61, 0xba, 0x44, 0x0f, 0xc3, 0x0a, 0x29,
0x35, 0x42, 0xca, 0x20, 0x96, 0x08, 0x85, 0x71, 0xf1, 0xb8, 0x41, 0x14, 0x40, 0x8c, 0x57, 0xc0,
0xa6, 0x0f, 0xd9, 0x97, 0x52, 0x04, 0x55, 0x68, 0x30, 0x1a, 0x30, 0x3a, 0x59, 0x5c, 0x78, 0x28,
0xc7, 0x70, 0xe3, 0xa1, 0x1c, 0xc3, 0x87, 0x87, 0x72, 0x8c, 0x0d, 0x8f, 0xe4, 0x18, 0x57, 0x3c,
0x92, 0x63, 0x3c, 0xf1, 0x48, 0x8e, 0xf1, 0xc2, 0x23, 0x39, 0xc6, 0x07, 0x8f, 0xe4, 0x18, 0x5f,
0x3c, 0x92, 0x63, 0xf8, 0xf0, 0x48, 0x8e, 0x71, 0xc2, 0x63, 0x39, 0x86, 0x28, 0x2e, 0x84, 0x69,
0x49, 0x6c, 0xe0, 0x00, 0x37, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0x31, 0x3e, 0x40, 0xab, 0x7d,
0x01, 0x00, 0x00,
}

View File

@ -0,0 +1,22 @@
syntax = "proto3";
package moby.sshforward.v1;
option go_package = "sshforward";
service SSH {
rpc CheckAgent(CheckAgentRequest) returns (CheckAgentResponse);
rpc ForwardAgent(stream BytesMessage) returns (stream BytesMessage);
}
// BytesMessage contains a chunk of byte data
message BytesMessage{
bytes data = 1;
}
message CheckAgentRequest {
string ID = 1;
}
message CheckAgentResponse {
}

View File

@ -0,0 +1,198 @@
package sshprovider
import (
"context"
"io"
"io/ioutil"
"net"
"os"
"time"
"github.com/moby/buildkit/session"
"github.com/moby/buildkit/session/sshforward"
"github.com/pkg/errors"
"golang.org/x/crypto/ssh"
"golang.org/x/crypto/ssh/agent"
"golang.org/x/sync/errgroup"
"google.golang.org/grpc"
"google.golang.org/grpc/metadata"
)
// AgentConfig is the config for a single exposed SSH agent
type AgentConfig struct {
ID string
Paths []string
}
// NewSSHAgentProvider creates a session provider that allows access to ssh agent
func NewSSHAgentProvider(confs []AgentConfig) (session.Attachable, error) {
m := map[string]source{}
for _, conf := range confs {
if len(conf.Paths) == 0 || len(conf.Paths) == 1 && conf.Paths[0] == "" {
conf.Paths = []string{os.Getenv("SSH_AUTH_SOCK")}
}
if conf.Paths[0] == "" {
return nil, errors.Errorf("invalid empty ssh agent socket, make sure SSH_AUTH_SOCK is set")
}
src, err := toAgentSource(conf.Paths)
if err != nil {
return nil, err
}
if conf.ID == "" {
conf.ID = sshforward.DefaultID
}
if _, ok := m[conf.ID]; ok {
return nil, errors.Errorf("invalid duplicate ID %s", conf.ID)
}
m[conf.ID] = src
}
return &socketProvider{m: m}, nil
}
type source struct {
agent agent.Agent
socket string
}
type socketProvider struct {
m map[string]source
}
func (sp *socketProvider) Register(server *grpc.Server) {
sshforward.RegisterSSHServer(server, sp)
}
func (sp *socketProvider) CheckAgent(ctx context.Context, req *sshforward.CheckAgentRequest) (*sshforward.CheckAgentResponse, error) {
id := sshforward.DefaultID
if req.ID != "" {
id = req.ID
}
if _, ok := sp.m[id]; !ok {
return &sshforward.CheckAgentResponse{}, errors.Errorf("unset ssh forward key %s", id)
}
return &sshforward.CheckAgentResponse{}, nil
}
func (sp *socketProvider) ForwardAgent(stream sshforward.SSH_ForwardAgentServer) error {
id := sshforward.DefaultID
opts, _ := metadata.FromIncomingContext(stream.Context()) // if no metadata continue with empty object
if v, ok := opts[sshforward.KeySSHID]; ok && len(v) > 0 && v[0] != "" {
id = v[0]
}
src, ok := sp.m[id]
if !ok {
return errors.Errorf("unset ssh forward key %s", id)
}
var a agent.Agent
if src.socket != "" {
conn, err := net.DialTimeout("unix", src.socket, time.Second)
if err != nil {
return errors.Wrapf(err, "failed to connect to %s", src.socket)
}
a = &readOnlyAgent{agent.NewClient(conn)}
defer conn.Close()
} else {
a = src.agent
}
s1, s2 := sockPair()
eg, ctx := errgroup.WithContext(context.TODO())
eg.Go(func() error {
return agent.ServeAgent(a, s1)
})
eg.Go(func() error {
defer s1.Close()
return sshforward.Copy(ctx, s2, stream)
})
return eg.Wait()
}
func toAgentSource(paths []string) (source, error) {
var keys bool
var socket string
a := agent.NewKeyring()
for _, p := range paths {
if socket != "" {
return source{}, errors.New("only single socket allowed")
}
fi, err := os.Stat(p)
if err != nil {
return source{}, errors.WithStack(err)
}
if fi.Mode()&os.ModeSocket > 0 {
if keys {
return source{}, errors.Errorf("invalid combination of keys and sockets")
}
socket = p
continue
}
keys = true
f, err := os.Open(p)
if err != nil {
return source{}, errors.Wrapf(err, "failed to open %s", p)
}
dt, err := ioutil.ReadAll(&io.LimitedReader{R: f, N: 100 * 1024})
if err != nil {
return source{}, errors.Wrapf(err, "failed to read %s", p)
}
k, err := ssh.ParseRawPrivateKey(dt)
if err != nil {
return source{}, errors.Wrapf(err, "failed to parse %s", p) // TODO: prompt passphrase?
}
if err := a.Add(agent.AddedKey{PrivateKey: k}); err != nil {
return source{}, errors.Wrapf(err, "failed to add %s to agent", p)
}
}
if socket != "" {
return source{socket: socket}, nil
}
return source{agent: a}, nil
}
func sockPair() (io.ReadWriteCloser, io.ReadWriteCloser) {
pr1, pw1 := io.Pipe()
pr2, pw2 := io.Pipe()
return &sock{pr1, pw2, pw1}, &sock{pr2, pw1, pw2}
}
type sock struct {
io.Reader
io.Writer
io.Closer
}
type readOnlyAgent struct {
agent.Agent
}
func (a *readOnlyAgent) Add(_ agent.AddedKey) error {
return errors.Errorf("adding new keys not allowed by buildkit")
}
func (a *readOnlyAgent) Remove(_ ssh.PublicKey) error {
return errors.Errorf("removing keys not allowed by buildkit")
}
func (a *readOnlyAgent) RemoveAll() error {
return errors.Errorf("removing keys not allowed by buildkit")
}
func (a *readOnlyAgent) Lock(_ []byte) error {
return errors.Errorf("locking agent not allowed by buildkit")
}

View File

@ -30,15 +30,18 @@ const (
CapBuildOpLLBFileName apicaps.CapID = "source.buildop.llbfilename"
CapExecMetaBase apicaps.CapID = "exec.meta.base"
CapExecMetaProxy apicaps.CapID = "exec.meta.proxyenv"
CapExecMetaNetwork apicaps.CapID = "exec.meta.network"
CapExecMountBind apicaps.CapID = "exec.mount.bind"
CapExecMountCache apicaps.CapID = "exec.mount.cache"
CapExecMountCacheSharing apicaps.CapID = "exec.mount.cache.sharing"
CapExecMountSelector apicaps.CapID = "exec.mount.selector"
CapExecMountTmpfs apicaps.CapID = "exec.mount.tmpfs"
CapMountSecret apicaps.CapID = "exec.mount.secret"
CapExecMetaBase apicaps.CapID = "exec.meta.base"
CapExecMetaProxy apicaps.CapID = "exec.meta.proxyenv"
CapExecMetaNetwork apicaps.CapID = "exec.meta.network"
CapExecMetaSetsDefaultPath apicaps.CapID = "exec.meta.setsdefaultpath"
CapExecMountBind apicaps.CapID = "exec.mount.bind"
CapExecMountCache apicaps.CapID = "exec.mount.cache"
CapExecMountCacheSharing apicaps.CapID = "exec.mount.cache.sharing"
CapExecMountSelector apicaps.CapID = "exec.mount.selector"
CapExecMountTmpfs apicaps.CapID = "exec.mount.tmpfs"
CapExecMountSecret apicaps.CapID = "exec.mount.secret"
CapExecMountSSH apicaps.CapID = "exec.mount.ssh"
CapExecCgroupsMounted apicaps.CapID = "exec.cgroup"
CapConstraints apicaps.CapID = "constraints"
CapPlatform apicaps.CapID = "platform"
@ -169,6 +172,12 @@ func init() {
Status: apicaps.CapStatusExperimental,
})
Caps.Init(apicaps.Cap{
ID: CapExecMetaSetsDefaultPath,
Enabled: true,
Status: apicaps.CapStatusExperimental,
})
Caps.Init(apicaps.Cap{
ID: CapExecMountBind,
Enabled: true,
@ -200,7 +209,19 @@ func init() {
})
Caps.Init(apicaps.Cap{
ID: CapMountSecret,
ID: CapExecMountSecret,
Enabled: true,
Status: apicaps.CapStatusExperimental,
})
Caps.Init(apicaps.Cap{
ID: CapExecMountSSH,
Enabled: true,
Status: apicaps.CapStatusExperimental,
})
Caps.Init(apicaps.Cap{
ID: CapExecCgroupsMounted,
Enabled: true,
Status: apicaps.CapStatusExperimental,
})

View File

@ -1,12 +1,25 @@
package pb
// InputIndex is incrementing index to the input vertex
type InputIndex int64
// OutputIndex is incrementing index that another vertex can depend on
type OutputIndex int64
// RootMount is a base mountpoint
const RootMount = "/"
// SkipOutput marks a disabled output index
const SkipOutput OutputIndex = -1
// Empty marks an input with no content
const Empty InputIndex = -1
// LLBBuilder is a special builder for BuildOp that directly builds LLB
const LLBBuilder InputIndex = -1
// LLBDefinitionInput marks an input that contains LLB definition for BuildOp
const LLBDefinitionInput = "buildkit.llb.definition"
// LLBDefaultDefinitionFile is a filename containing the definition in LLBBuilder
const LLBDefaultDefinitionFile = LLBDefinitionInput

View File

@ -19,6 +19,7 @@
Mount
CacheOpt
SecretOpt
SSHOpt
CopyOp
CopySource
SourceOp
@ -41,6 +42,8 @@ import _ "github.com/gogo/protobuf/gogoproto"
import github_com_opencontainers_go_digest "github.com/opencontainers/go-digest"
import github_com_moby_buildkit_util_apicaps "github.com/moby/buildkit/util/apicaps"
import sortkeys "github.com/gogo/protobuf/sortkeys"
import io "io"
// Reference imports to suppress errors if they are not otherwise used.
@ -512,6 +515,7 @@ type Mount struct {
MountType MountType `protobuf:"varint,6,opt,name=mountType,proto3,enum=pb.MountType" json:"mountType,omitempty"`
CacheOpt *CacheOpt `protobuf:"bytes,20,opt,name=cacheOpt" json:"cacheOpt,omitempty"`
SecretOpt *SecretOpt `protobuf:"bytes,21,opt,name=secretOpt" json:"secretOpt,omitempty"`
SSHOpt *SSHOpt `protobuf:"bytes,22,opt,name=SSHOpt" json:"SSHOpt,omitempty"`
}
func (m *Mount) Reset() { *m = Mount{} }
@ -561,6 +565,13 @@ func (m *Mount) GetSecretOpt() *SecretOpt {
return nil
}
func (m *Mount) GetSSHOpt() *SSHOpt {
if m != nil {
return m.SSHOpt
}
return nil
}
// CacheOpt defines options specific to cache mounts
type CacheOpt struct {
// ID is an optional namespace for the mount
@ -643,6 +654,61 @@ func (m *SecretOpt) GetOptional() bool {
return false
}
// SSHOpt defines options describing secret mounts
type SSHOpt struct {
// ID of exposed ssh rule. Used for quering the value.
ID string `protobuf:"bytes,1,opt,name=ID,proto3" json:"ID,omitempty"`
// UID of agent socket
Uid uint32 `protobuf:"varint,2,opt,name=uid,proto3" json:"uid,omitempty"`
// GID of agent socket
Gid uint32 `protobuf:"varint,3,opt,name=gid,proto3" json:"gid,omitempty"`
// Mode is the filesystem mode of agent socket
Mode uint32 `protobuf:"varint,4,opt,name=mode,proto3" json:"mode,omitempty"`
// Optional defines if ssh socket is required. Error is produced
// if client does not expose ssh.
Optional bool `protobuf:"varint,5,opt,name=optional,proto3" json:"optional,omitempty"`
}
func (m *SSHOpt) Reset() { *m = SSHOpt{} }
func (m *SSHOpt) String() string { return proto.CompactTextString(m) }
func (*SSHOpt) ProtoMessage() {}
func (*SSHOpt) Descriptor() ([]byte, []int) { return fileDescriptorOps, []int{8} }
func (m *SSHOpt) GetID() string {
if m != nil {
return m.ID
}
return ""
}
func (m *SSHOpt) GetUid() uint32 {
if m != nil {
return m.Uid
}
return 0
}
func (m *SSHOpt) GetGid() uint32 {
if m != nil {
return m.Gid
}
return 0
}
func (m *SSHOpt) GetMode() uint32 {
if m != nil {
return m.Mode
}
return 0
}
func (m *SSHOpt) GetOptional() bool {
if m != nil {
return m.Optional
}
return false
}
// CopyOp copies files across Ops.
type CopyOp struct {
Src []*CopySource `protobuf:"bytes,1,rep,name=src" json:"src,omitempty"`
@ -652,7 +718,7 @@ type CopyOp struct {
func (m *CopyOp) Reset() { *m = CopyOp{} }
func (m *CopyOp) String() string { return proto.CompactTextString(m) }
func (*CopyOp) ProtoMessage() {}
func (*CopyOp) Descriptor() ([]byte, []int) { return fileDescriptorOps, []int{8} }
func (*CopyOp) Descriptor() ([]byte, []int) { return fileDescriptorOps, []int{9} }
func (m *CopyOp) GetSrc() []*CopySource {
if m != nil {
@ -677,7 +743,7 @@ type CopySource struct {
func (m *CopySource) Reset() { *m = CopySource{} }
func (m *CopySource) String() string { return proto.CompactTextString(m) }
func (*CopySource) ProtoMessage() {}
func (*CopySource) Descriptor() ([]byte, []int) { return fileDescriptorOps, []int{9} }
func (*CopySource) Descriptor() ([]byte, []int) { return fileDescriptorOps, []int{10} }
func (m *CopySource) GetSelector() string {
if m != nil {
@ -698,7 +764,7 @@ type SourceOp struct {
func (m *SourceOp) Reset() { *m = SourceOp{} }
func (m *SourceOp) String() string { return proto.CompactTextString(m) }
func (*SourceOp) ProtoMessage() {}
func (*SourceOp) Descriptor() ([]byte, []int) { return fileDescriptorOps, []int{10} }
func (*SourceOp) Descriptor() ([]byte, []int) { return fileDescriptorOps, []int{11} }
func (m *SourceOp) GetIdentifier() string {
if m != nil {
@ -726,7 +792,7 @@ type BuildOp struct {
func (m *BuildOp) Reset() { *m = BuildOp{} }
func (m *BuildOp) String() string { return proto.CompactTextString(m) }
func (*BuildOp) ProtoMessage() {}
func (*BuildOp) Descriptor() ([]byte, []int) { return fileDescriptorOps, []int{11} }
func (*BuildOp) Descriptor() ([]byte, []int) { return fileDescriptorOps, []int{12} }
func (m *BuildOp) GetInputs() map[string]*BuildInput {
if m != nil {
@ -757,7 +823,7 @@ type BuildInput struct {
func (m *BuildInput) Reset() { *m = BuildInput{} }
func (m *BuildInput) String() string { return proto.CompactTextString(m) }
func (*BuildInput) ProtoMessage() {}
func (*BuildInput) Descriptor() ([]byte, []int) { return fileDescriptorOps, []int{12} }
func (*BuildInput) Descriptor() ([]byte, []int) { return fileDescriptorOps, []int{13} }
// OpMetadata is a per-vertex metadata entry, which can be defined for arbitrary Op vertex and overridable on the run time.
type OpMetadata struct {
@ -774,7 +840,7 @@ type OpMetadata struct {
func (m *OpMetadata) Reset() { *m = OpMetadata{} }
func (m *OpMetadata) String() string { return proto.CompactTextString(m) }
func (*OpMetadata) ProtoMessage() {}
func (*OpMetadata) Descriptor() ([]byte, []int) { return fileDescriptorOps, []int{13} }
func (*OpMetadata) Descriptor() ([]byte, []int) { return fileDescriptorOps, []int{14} }
func (m *OpMetadata) GetIgnoreCache() bool {
if m != nil {
@ -811,7 +877,7 @@ type ExportCache struct {
func (m *ExportCache) Reset() { *m = ExportCache{} }
func (m *ExportCache) String() string { return proto.CompactTextString(m) }
func (*ExportCache) ProtoMessage() {}
func (*ExportCache) Descriptor() ([]byte, []int) { return fileDescriptorOps, []int{14} }
func (*ExportCache) Descriptor() ([]byte, []int) { return fileDescriptorOps, []int{15} }
func (m *ExportCache) GetValue() bool {
if m != nil {
@ -830,7 +896,7 @@ type ProxyEnv struct {
func (m *ProxyEnv) Reset() { *m = ProxyEnv{} }
func (m *ProxyEnv) String() string { return proto.CompactTextString(m) }
func (*ProxyEnv) ProtoMessage() {}
func (*ProxyEnv) Descriptor() ([]byte, []int) { return fileDescriptorOps, []int{15} }
func (*ProxyEnv) Descriptor() ([]byte, []int) { return fileDescriptorOps, []int{16} }
func (m *ProxyEnv) GetHttpProxy() string {
if m != nil {
@ -868,7 +934,7 @@ type WorkerConstraints struct {
func (m *WorkerConstraints) Reset() { *m = WorkerConstraints{} }
func (m *WorkerConstraints) String() string { return proto.CompactTextString(m) }
func (*WorkerConstraints) ProtoMessage() {}
func (*WorkerConstraints) Descriptor() ([]byte, []int) { return fileDescriptorOps, []int{16} }
func (*WorkerConstraints) Descriptor() ([]byte, []int) { return fileDescriptorOps, []int{17} }
func (m *WorkerConstraints) GetFilter() []string {
if m != nil {
@ -889,7 +955,7 @@ type Definition struct {
func (m *Definition) Reset() { *m = Definition{} }
func (m *Definition) String() string { return proto.CompactTextString(m) }
func (*Definition) ProtoMessage() {}
func (*Definition) Descriptor() ([]byte, []int) { return fileDescriptorOps, []int{17} }
func (*Definition) Descriptor() ([]byte, []int) { return fileDescriptorOps, []int{18} }
func (m *Definition) GetDef() [][]byte {
if m != nil {
@ -913,7 +979,7 @@ type HostIP struct {
func (m *HostIP) Reset() { *m = HostIP{} }
func (m *HostIP) String() string { return proto.CompactTextString(m) }
func (*HostIP) ProtoMessage() {}
func (*HostIP) Descriptor() ([]byte, []int) { return fileDescriptorOps, []int{18} }
func (*HostIP) Descriptor() ([]byte, []int) { return fileDescriptorOps, []int{19} }
func (m *HostIP) GetHost() string {
if m != nil {
@ -938,6 +1004,7 @@ func init() {
proto.RegisterType((*Mount)(nil), "pb.Mount")
proto.RegisterType((*CacheOpt)(nil), "pb.CacheOpt")
proto.RegisterType((*SecretOpt)(nil), "pb.SecretOpt")
proto.RegisterType((*SSHOpt)(nil), "pb.SSHOpt")
proto.RegisterType((*CopyOp)(nil), "pb.CopyOp")
proto.RegisterType((*CopySource)(nil), "pb.CopySource")
proto.RegisterType((*SourceOp)(nil), "pb.SourceOp")
@ -1355,6 +1422,18 @@ func (m *Mount) MarshalTo(dAtA []byte) (int, error) {
}
i += n11
}
if m.SSHOpt != nil {
dAtA[i] = 0xb2
i++
dAtA[i] = 0x1
i++
i = encodeVarintOps(dAtA, i, uint64(m.SSHOpt.Size()))
n12, err := m.SSHOpt.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n12
}
return i, nil
}
@ -1436,6 +1515,55 @@ func (m *SecretOpt) MarshalTo(dAtA []byte) (int, error) {
return i, nil
}
func (m *SSHOpt) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
n, err := m.MarshalTo(dAtA)
if err != nil {
return nil, err
}
return dAtA[:n], nil
}
func (m *SSHOpt) MarshalTo(dAtA []byte) (int, error) {
var i int
_ = i
var l int
_ = l
if len(m.ID) > 0 {
dAtA[i] = 0xa
i++
i = encodeVarintOps(dAtA, i, uint64(len(m.ID)))
i += copy(dAtA[i:], m.ID)
}
if m.Uid != 0 {
dAtA[i] = 0x10
i++
i = encodeVarintOps(dAtA, i, uint64(m.Uid))
}
if m.Gid != 0 {
dAtA[i] = 0x18
i++
i = encodeVarintOps(dAtA, i, uint64(m.Gid))
}
if m.Mode != 0 {
dAtA[i] = 0x20
i++
i = encodeVarintOps(dAtA, i, uint64(m.Mode))
}
if m.Optional {
dAtA[i] = 0x28
i++
if m.Optional {
dAtA[i] = 1
} else {
dAtA[i] = 0
}
i++
}
return i, nil
}
func (m *CopyOp) Marshal() (dAtA []byte, err error) {
size := m.Size()
dAtA = make([]byte, size)
@ -1523,10 +1651,15 @@ func (m *SourceOp) MarshalTo(dAtA []byte) (int, error) {
i += copy(dAtA[i:], m.Identifier)
}
if len(m.Attrs) > 0 {
keysForAttrs := make([]string, 0, len(m.Attrs))
for k, _ := range m.Attrs {
keysForAttrs = append(keysForAttrs, string(k))
}
sortkeys.Strings(keysForAttrs)
for _, k := range keysForAttrs {
dAtA[i] = 0x12
i++
v := m.Attrs[k]
v := m.Attrs[string(k)]
mapSize := 1 + len(k) + sovOps(uint64(len(k))) + 1 + len(v) + sovOps(uint64(len(v)))
i = encodeVarintOps(dAtA, i, uint64(mapSize))
dAtA[i] = 0xa
@ -1563,10 +1696,15 @@ func (m *BuildOp) MarshalTo(dAtA []byte) (int, error) {
i = encodeVarintOps(dAtA, i, uint64(m.Builder))
}
if len(m.Inputs) > 0 {
keysForInputs := make([]string, 0, len(m.Inputs))
for k, _ := range m.Inputs {
keysForInputs = append(keysForInputs, string(k))
}
sortkeys.Strings(keysForInputs)
for _, k := range keysForInputs {
dAtA[i] = 0x12
i++
v := m.Inputs[k]
v := m.Inputs[string(k)]
msgSize := 0
if v != nil {
msgSize = v.Size()
@ -1582,11 +1720,11 @@ func (m *BuildOp) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x12
i++
i = encodeVarintOps(dAtA, i, uint64(v.Size()))
n12, err := v.MarshalTo(dAtA[i:])
n13, err := v.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n12
i += n13
}
}
}
@ -1594,17 +1732,22 @@ func (m *BuildOp) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x1a
i++
i = encodeVarintOps(dAtA, i, uint64(m.Def.Size()))
n13, err := m.Def.MarshalTo(dAtA[i:])
n14, err := m.Def.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n13
i += n14
}
if len(m.Attrs) > 0 {
keysForAttrs := make([]string, 0, len(m.Attrs))
for k, _ := range m.Attrs {
keysForAttrs = append(keysForAttrs, string(k))
}
sortkeys.Strings(keysForAttrs)
for _, k := range keysForAttrs {
dAtA[i] = 0x22
i++
v := m.Attrs[k]
v := m.Attrs[string(k)]
mapSize := 1 + len(k) + sovOps(uint64(len(k))) + 1 + len(v) + sovOps(uint64(len(v)))
i = encodeVarintOps(dAtA, i, uint64(mapSize))
dAtA[i] = 0xa
@ -1669,10 +1812,15 @@ func (m *OpMetadata) MarshalTo(dAtA []byte) (int, error) {
i++
}
if len(m.Description) > 0 {
keysForDescription := make([]string, 0, len(m.Description))
for k, _ := range m.Description {
keysForDescription = append(keysForDescription, string(k))
}
sortkeys.Strings(keysForDescription)
for _, k := range keysForDescription {
dAtA[i] = 0x12
i++
v := m.Description[k]
v := m.Description[string(k)]
mapSize := 1 + len(k) + sovOps(uint64(len(k))) + 1 + len(v) + sovOps(uint64(len(v)))
i = encodeVarintOps(dAtA, i, uint64(mapSize))
dAtA[i] = 0xa
@ -1689,17 +1837,22 @@ func (m *OpMetadata) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x22
i++
i = encodeVarintOps(dAtA, i, uint64(m.ExportCache.Size()))
n14, err := m.ExportCache.MarshalTo(dAtA[i:])
n15, err := m.ExportCache.MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n14
i += n15
}
if len(m.Caps) > 0 {
keysForCaps := make([]string, 0, len(m.Caps))
for k, _ := range m.Caps {
keysForCaps = append(keysForCaps, string(k))
}
sortkeys.Strings(keysForCaps)
for _, k := range keysForCaps {
dAtA[i] = 0x2a
i++
v := m.Caps[k]
v := m.Caps[github_com_moby_buildkit_util_apicaps.CapID(k)]
mapSize := 1 + len(k) + sovOps(uint64(len(k))) + 1 + 1
i = encodeVarintOps(dAtA, i, uint64(mapSize))
dAtA[i] = 0xa
@ -1846,10 +1999,15 @@ func (m *Definition) MarshalTo(dAtA []byte) (int, error) {
}
}
if len(m.Metadata) > 0 {
keysForMetadata := make([]string, 0, len(m.Metadata))
for k, _ := range m.Metadata {
keysForMetadata = append(keysForMetadata, string(k))
}
sortkeys.Strings(keysForMetadata)
for _, k := range keysForMetadata {
dAtA[i] = 0x12
i++
v := m.Metadata[k]
v := m.Metadata[github_com_opencontainers_go_digest.Digest(k)]
msgSize := 0
if (&v) != nil {
msgSize = (&v).Size()
@ -1864,11 +2022,11 @@ func (m *Definition) MarshalTo(dAtA []byte) (int, error) {
dAtA[i] = 0x12
i++
i = encodeVarintOps(dAtA, i, uint64((&v).Size()))
n15, err := (&v).MarshalTo(dAtA[i:])
n16, err := (&v).MarshalTo(dAtA[i:])
if err != nil {
return 0, err
}
i += n15
i += n16
}
}
return i, nil
@ -2099,6 +2257,10 @@ func (m *Mount) Size() (n int) {
l = m.SecretOpt.Size()
n += 2 + l + sovOps(uint64(l))
}
if m.SSHOpt != nil {
l = m.SSHOpt.Size()
n += 2 + l + sovOps(uint64(l))
}
return n
}
@ -2137,6 +2299,28 @@ func (m *SecretOpt) Size() (n int) {
return n
}
func (m *SSHOpt) Size() (n int) {
var l int
_ = l
l = len(m.ID)
if l > 0 {
n += 1 + l + sovOps(uint64(l))
}
if m.Uid != 0 {
n += 1 + sovOps(uint64(m.Uid))
}
if m.Gid != 0 {
n += 1 + sovOps(uint64(m.Gid))
}
if m.Mode != 0 {
n += 1 + sovOps(uint64(m.Mode))
}
if m.Optional {
n += 2
}
return n
}
func (m *CopyOp) Size() (n int) {
var l int
_ = l
@ -3508,6 +3692,39 @@ func (m *Mount) Unmarshal(dAtA []byte) error {
return err
}
iNdEx = postIndex
case 22:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field SSHOpt", wireType)
}
var msglen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowOps
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
msglen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if msglen < 0 {
return ErrInvalidLengthOps
}
postIndex := iNdEx + msglen
if postIndex > l {
return io.ErrUnexpectedEOF
}
if m.SSHOpt == nil {
m.SSHOpt = &SSHOpt{}
}
if err := m.SSHOpt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {
return err
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipOps(dAtA[iNdEx:])
@ -3783,6 +4000,162 @@ func (m *SecretOpt) Unmarshal(dAtA []byte) error {
}
return nil
}
func (m *SSHOpt) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowOps
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: SSHOpt: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: SSHOpt: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowOps
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthOps
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.ID = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Uid", wireType)
}
m.Uid = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowOps
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Uid |= (uint32(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 3:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Gid", wireType)
}
m.Gid = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowOps
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Gid |= (uint32(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 4:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Mode", wireType)
}
m.Mode = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowOps
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.Mode |= (uint32(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 5:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Optional", wireType)
}
var v int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowOps
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
v |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
m.Optional = bool(v != 0)
default:
iNdEx = preIndex
skippy, err := skipOps(dAtA[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthOps
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func (m *CopyOp) Unmarshal(dAtA []byte) error {
l := len(dAtA)
iNdEx := 0
@ -5663,94 +6036,96 @@ var (
func init() { proto.RegisterFile("ops.proto", fileDescriptorOps) }
var fileDescriptorOps = []byte{
// 1415 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x57, 0xcd, 0x6f, 0x1b, 0x45,
0x14, 0x8f, 0xd7, 0x9f, 0xfb, 0x9c, 0xa4, 0x66, 0xfa, 0x81, 0x09, 0x25, 0x09, 0x5b, 0x40, 0x69,
0xd2, 0x38, 0x92, 0x2b, 0xb5, 0x15, 0x87, 0x8a, 0xf8, 0xa3, 0x8a, 0x29, 0x89, 0xa3, 0x71, 0x08,
0xc7, 0x6a, 0xb3, 0x1e, 0x3b, 0xab, 0x38, 0x3b, 0xab, 0xdd, 0x71, 0x1b, 0x5f, 0x38, 0xf4, 0x2f,
0x40, 0x42, 0xe2, 0xce, 0x91, 0x0b, 0xff, 0x01, 0xf7, 0x1e, 0xb9, 0xc2, 0xa1, 0xa0, 0xf2, 0x8f,
0xa0, 0xf7, 0x66, 0xd6, 0xbb, 0xfd, 0x40, 0xb4, 0x82, 0x93, 0xdf, 0xbc, 0xf7, 0x9b, 0xdf, 0xcc,
0xfc, 0xde, 0x9b, 0x79, 0x6b, 0xb0, 0x65, 0x18, 0x37, 0xc2, 0x48, 0x2a, 0xc9, 0xac, 0xf0, 0x64,
0x65, 0x7b, 0xec, 0xab, 0xd3, 0xe9, 0x49, 0xc3, 0x93, 0xe7, 0x3b, 0x63, 0x39, 0x96, 0x3b, 0x14,
0x3a, 0x99, 0x8e, 0x68, 0x44, 0x03, 0xb2, 0xf4, 0x14, 0xe7, 0x47, 0x0b, 0xac, 0x7e, 0xc8, 0x3e,
0x86, 0x92, 0x1f, 0x84, 0x53, 0x15, 0xd7, 0x73, 0xeb, 0xf9, 0x8d, 0x6a, 0xd3, 0x6e, 0x84, 0x27,
0x8d, 0x1e, 0x7a, 0xb8, 0x09, 0xb0, 0x75, 0x28, 0x88, 0x0b, 0xe1, 0xd5, 0xad, 0xf5, 0xdc, 0x46,
0xb5, 0x09, 0x08, 0xe8, 0x5e, 0x08, 0xaf, 0x1f, 0xee, 0x2d, 0x70, 0x8a, 0xb0, 0xcf, 0xa0, 0x14,
0xcb, 0x69, 0xe4, 0x89, 0x7a, 0x9e, 0x30, 0x8b, 0x88, 0x19, 0x90, 0x87, 0x50, 0x26, 0x8a, 0x4c,
0x9e, 0x0c, 0x67, 0xf5, 0x42, 0xca, 0xd4, 0x96, 0xe1, 0x4c, 0x33, 0x61, 0x84, 0xdd, 0x80, 0xe2,
0xc9, 0xd4, 0x9f, 0x0c, 0xeb, 0x45, 0x82, 0x54, 0x11, 0xd2, 0x42, 0x07, 0x61, 0x74, 0x8c, 0x6d,
0x40, 0x25, 0x9c, 0xb8, 0x6a, 0x24, 0xa3, 0xf3, 0x3a, 0xa4, 0x0b, 0x1e, 0x1a, 0x1f, 0x9f, 0x47,
0xd9, 0x5d, 0xa8, 0x7a, 0x32, 0x88, 0x55, 0xe4, 0xfa, 0x81, 0x8a, 0xeb, 0x55, 0x02, 0x5f, 0x45,
0xf0, 0x37, 0x32, 0x3a, 0x13, 0x51, 0x3b, 0x0d, 0xf2, 0x2c, 0xb2, 0x55, 0x00, 0x4b, 0x86, 0xce,
0x0f, 0x39, 0xa8, 0x24, 0xac, 0xcc, 0x81, 0xc5, 0xdd, 0xc8, 0x3b, 0xf5, 0x95, 0xf0, 0xd4, 0x34,
0x12, 0xf5, 0xdc, 0x7a, 0x6e, 0xc3, 0xe6, 0x2f, 0xf9, 0xd8, 0x32, 0x58, 0xfd, 0x01, 0x09, 0x65,
0x73, 0xab, 0x3f, 0x60, 0x75, 0x28, 0x1f, 0xbb, 0x91, 0xef, 0x06, 0x8a, 0x94, 0xb1, 0x79, 0x32,
0x64, 0xd7, 0xc1, 0xee, 0x0f, 0x8e, 0x45, 0x14, 0xfb, 0x32, 0x20, 0x3d, 0x6c, 0x9e, 0x3a, 0xd8,
0x2a, 0x40, 0x7f, 0xf0, 0x40, 0xb8, 0x48, 0x1a, 0xd7, 0x8b, 0xeb, 0xf9, 0x0d, 0x9b, 0x67, 0x3c,
0xce, 0xb7, 0x50, 0xa4, 0x1c, 0xb1, 0x2f, 0xa1, 0x34, 0xf4, 0xc7, 0x22, 0x56, 0x7a, 0x3b, 0xad,
0xe6, 0xb3, 0xe7, 0x6b, 0x0b, 0xbf, 0x3f, 0x5f, 0xdb, 0xcc, 0x14, 0x83, 0x0c, 0x45, 0xe0, 0xc9,
0x40, 0xb9, 0x7e, 0x20, 0xa2, 0x78, 0x67, 0x2c, 0xb7, 0xf5, 0x94, 0x46, 0x87, 0x7e, 0xb8, 0x61,
0x60, 0x37, 0xa1, 0xe8, 0x07, 0x43, 0x71, 0x41, 0xfb, 0xcf, 0xb7, 0x2e, 0x1b, 0xaa, 0x6a, 0x7f,
0xaa, 0xc2, 0xa9, 0xea, 0x61, 0x88, 0x6b, 0x84, 0x13, 0x42, 0x49, 0x97, 0x00, 0xbb, 0x0e, 0x85,
0x73, 0xa1, 0x5c, 0x5a, 0xbe, 0xda, 0xac, 0xa0, 0xb4, 0xfb, 0x42, 0xb9, 0x9c, 0xbc, 0x58, 0x5d,
0xe7, 0x72, 0x8a, 0xd2, 0x5b, 0x69, 0x75, 0xed, 0xa3, 0x87, 0x9b, 0x00, 0xfb, 0x14, 0xca, 0x81,
0x50, 0x4f, 0x64, 0x74, 0x46, 0x12, 0x2d, 0xeb, 0x9c, 0x1f, 0x08, 0xb5, 0x2f, 0x87, 0x82, 0x27,
0x31, 0xe7, 0xa7, 0x1c, 0x14, 0x90, 0x98, 0x31, 0x28, 0xb8, 0xd1, 0x58, 0x97, 0xab, 0xcd, 0xc9,
0x66, 0x35, 0xc8, 0x8b, 0xe0, 0x31, 0xad, 0x61, 0x73, 0x34, 0xd1, 0xe3, 0x3d, 0x19, 0x1a, 0xd1,
0xd1, 0xc4, 0x79, 0xd3, 0x58, 0x44, 0x46, 0x6b, 0xb2, 0xd9, 0x4d, 0xb0, 0xc3, 0x48, 0x5e, 0xcc,
0x1e, 0xe1, 0xec, 0x62, 0xa6, 0x92, 0xd0, 0xd9, 0x0d, 0x1e, 0xf3, 0x4a, 0x68, 0x2c, 0xb6, 0x09,
0x20, 0x2e, 0x54, 0xe4, 0xee, 0xc9, 0x58, 0xc5, 0xf5, 0x12, 0x9d, 0x86, 0x0a, 0x18, 0x1d, 0xbd,
0x43, 0x9e, 0x89, 0x3a, 0x3f, 0x5b, 0x50, 0xa4, 0x43, 0xb2, 0x0d, 0x94, 0x34, 0x9c, 0xea, 0xec,
0xe4, 0x5b, 0xcc, 0x48, 0x0a, 0x94, 0xbc, 0xb9, 0xa2, 0x98, 0xc8, 0x15, 0xa8, 0xc4, 0x62, 0x22,
0x3c, 0x25, 0x23, 0x53, 0x3f, 0xf3, 0x31, 0x6e, 0x7d, 0x88, 0x29, 0xd6, 0xa7, 0x21, 0x9b, 0x6d,
0x41, 0x49, 0x52, 0x5e, 0xe8, 0x40, 0xff, 0x90, 0x2d, 0x03, 0x41, 0xf2, 0x48, 0xb8, 0x43, 0x19,
0x4c, 0x66, 0x74, 0xcc, 0x0a, 0x9f, 0x8f, 0xd9, 0x16, 0xd8, 0x94, 0x89, 0xa3, 0x59, 0x28, 0xea,
0x25, 0xca, 0xc0, 0xd2, 0x3c, 0x4b, 0xe8, 0xe4, 0x69, 0x1c, 0x6f, 0x9e, 0xe7, 0x7a, 0xa7, 0xa2,
0x1f, 0xaa, 0xfa, 0x95, 0x54, 0xaf, 0xb6, 0xf1, 0xf1, 0x79, 0x14, 0x69, 0x63, 0xe1, 0x45, 0x42,
0x21, 0xf4, 0x2a, 0x41, 0x89, 0x76, 0x90, 0x38, 0x79, 0x1a, 0x77, 0x7a, 0x50, 0x49, 0x28, 0xf0,
0x0a, 0xf5, 0x3a, 0xe6, 0x72, 0x59, 0xbd, 0x0e, 0xdb, 0x86, 0x72, 0x7c, 0xea, 0x46, 0x7e, 0x30,
0x26, 0x5d, 0x96, 0x9b, 0x97, 0xe7, 0x2b, 0x0e, 0xb4, 0x1f, 0xc9, 0x12, 0x8c, 0x23, 0xc1, 0x9e,
0x2f, 0xf1, 0x1a, 0x57, 0x0d, 0xf2, 0x53, 0x7f, 0x48, 0x3c, 0x4b, 0x1c, 0x4d, 0xf4, 0x8c, 0x7d,
0x5d, 0x27, 0x4b, 0x1c, 0x4d, 0x14, 0xfb, 0x5c, 0x0e, 0x05, 0xc9, 0xba, 0xc4, 0xc9, 0x46, 0xfd,
0x64, 0xa8, 0x7c, 0x19, 0xb8, 0x93, 0x44, 0xbf, 0x64, 0xec, 0xdc, 0x87, 0x92, 0x7e, 0xc3, 0xd8,
0x3a, 0xe4, 0xe3, 0xc8, 0x33, 0xef, 0xe8, 0x72, 0xf2, 0xb8, 0xe9, 0x67, 0x90, 0x63, 0x68, 0x9e,
0x48, 0x2b, 0x4d, 0xa4, 0xc3, 0x01, 0x52, 0xd8, 0xff, 0x53, 0x30, 0xce, 0xf7, 0x39, 0xa8, 0x24,
0xcf, 0x2f, 0xbe, 0x25, 0xfe, 0x50, 0x04, 0xca, 0x1f, 0xf9, 0x22, 0x32, 0x62, 0x64, 0x3c, 0x6c,
0x1b, 0x8a, 0xae, 0x52, 0x51, 0x72, 0x45, 0xdf, 0xcf, 0xbe, 0xdd, 0x8d, 0x5d, 0x8c, 0x74, 0x03,
0x15, 0xcd, 0xb8, 0x46, 0xad, 0xdc, 0x03, 0x48, 0x9d, 0xa8, 0xdf, 0x99, 0x98, 0x19, 0x56, 0x34,
0xd9, 0x15, 0x28, 0x3e, 0x76, 0x27, 0x53, 0x61, 0x36, 0xa5, 0x07, 0x9f, 0x5b, 0xf7, 0x72, 0xce,
0x2f, 0x16, 0x94, 0xcd, 0x5b, 0xce, 0x6e, 0x41, 0x99, 0xde, 0x72, 0xb3, 0xa3, 0x37, 0x9f, 0x34,
0x81, 0xb0, 0x9d, 0x79, 0x93, 0xca, 0xec, 0xd1, 0x50, 0xe9, 0x66, 0x65, 0xf6, 0x98, 0xb6, 0xac,
0xfc, 0x50, 0x8c, 0x4c, 0x37, 0xa2, 0x54, 0x74, 0xc4, 0xc8, 0x0f, 0x7c, 0xcc, 0x19, 0xc7, 0x10,
0xbb, 0x95, 0x9c, 0xba, 0x40, 0x8c, 0xd7, 0xb2, 0x8c, 0xaf, 0x1f, 0xba, 0x07, 0xd5, 0xcc, 0x32,
0x6f, 0x38, 0xf5, 0x27, 0xd9, 0x53, 0x9b, 0x25, 0x89, 0x4e, 0xb7, 0xd2, 0x54, 0x85, 0xff, 0xa0,
0xdf, 0x1d, 0x80, 0x94, 0xf2, 0xed, 0x2b, 0xc5, 0x79, 0x9a, 0x07, 0xe8, 0x87, 0xf8, 0x78, 0x0e,
0x5d, 0x7a, 0x93, 0x17, 0xfd, 0x71, 0x20, 0x23, 0xf1, 0x88, 0x2e, 0x2b, 0xcd, 0xaf, 0xf0, 0xaa,
0xf6, 0xd1, 0xbd, 0x62, 0xbb, 0x50, 0x1d, 0x8a, 0xd8, 0x8b, 0x7c, 0x2a, 0x72, 0x23, 0xfa, 0x1a,
0x9e, 0x29, 0xe5, 0x69, 0x74, 0x52, 0x84, 0xd6, 0x2a, 0x3b, 0x87, 0x35, 0x61, 0x51, 0x5c, 0x84,
0x32, 0x52, 0x66, 0x15, 0xdd, 0xf2, 0x2f, 0xe9, 0x8f, 0x07, 0xf4, 0xd3, 0x4a, 0xbc, 0x2a, 0xd2,
0x01, 0x73, 0xa1, 0xe0, 0xb9, 0xa1, 0xee, 0x77, 0xd5, 0x66, 0xfd, 0x95, 0xf5, 0xda, 0x6e, 0xa8,
0x45, 0x6b, 0xdd, 0xc6, 0xb3, 0x3e, 0xfd, 0x63, 0x6d, 0x2b, 0xd3, 0xe4, 0xce, 0xe5, 0xc9, 0x6c,
0x87, 0xea, 0xe5, 0xcc, 0x57, 0x3b, 0x53, 0xe5, 0x4f, 0x76, 0xdc, 0xd0, 0x47, 0x3a, 0x9c, 0xd8,
0xeb, 0x70, 0xa2, 0x5e, 0xb9, 0x0f, 0xb5, 0x57, 0xf7, 0xfd, 0x2e, 0x39, 0x58, 0xb9, 0x0b, 0xf6,
0x7c, 0x1f, 0xff, 0x36, 0xb1, 0x92, 0x4d, 0xde, 0x0d, 0xa8, 0x66, 0xce, 0x8d, 0xc0, 0x63, 0x02,
0x6a, 0xf5, 0xf5, 0xc0, 0x79, 0x8a, 0xdf, 0x1b, 0x49, 0xc7, 0xf9, 0x08, 0xe0, 0x54, 0xa9, 0xf0,
0x11, 0xb5, 0x20, 0xb3, 0x88, 0x8d, 0x1e, 0x42, 0xb0, 0x35, 0xa8, 0xe2, 0x20, 0x36, 0x71, 0xbd,
0x53, 0x9a, 0x11, 0x6b, 0xc0, 0x87, 0x60, 0x8f, 0xe6, 0xd3, 0x75, 0xeb, 0xa8, 0x8c, 0x92, 0xd9,
0x1f, 0x40, 0x25, 0x90, 0x26, 0xa6, 0x3b, 0x62, 0x39, 0x90, 0x14, 0x72, 0xb6, 0xe0, 0xbd, 0xd7,
0x3e, 0x8e, 0xd8, 0x35, 0x28, 0x8d, 0xfc, 0x89, 0xa2, 0xeb, 0x8a, 0x4d, 0xd6, 0x8c, 0x9c, 0xdf,
0x72, 0x00, 0xe9, 0xd5, 0x42, 0x45, 0xf0, 0xde, 0x21, 0x66, 0x51, 0xdf, 0xb3, 0x09, 0x54, 0xce,
0x4d, 0x06, 0x4d, 0x1d, 0x5d, 0x7f, 0xf9, 0x3a, 0x36, 0x92, 0x04, 0xeb, 0xdc, 0x36, 0x4d, 0x6e,
0xdf, 0xe5, 0x03, 0x66, 0xbe, 0xc2, 0xca, 0x43, 0x58, 0x7a, 0x89, 0xee, 0x2d, 0x6f, 0x6a, 0x5a,
0x65, 0xd9, 0x94, 0xdd, 0x82, 0x92, 0x6e, 0xee, 0xf8, 0x6e, 0xa3, 0x65, 0x68, 0xc8, 0xa6, 0xde,
0x72, 0x98, 0x7c, 0xea, 0xf5, 0x0e, 0x37, 0x37, 0xa0, 0x6c, 0x3e, 0x5a, 0x98, 0x0d, 0xc5, 0xaf,
0x0f, 0x06, 0xdd, 0xa3, 0xda, 0x02, 0xab, 0x40, 0x61, 0xaf, 0x3f, 0x38, 0xaa, 0xe5, 0xd0, 0x3a,
0xe8, 0x1f, 0x74, 0x6b, 0xd6, 0xe6, 0x17, 0x60, 0xcf, 0x9b, 0x2b, 0xba, 0x5b, 0xbd, 0x83, 0x4e,
0x6d, 0x81, 0x01, 0x94, 0x06, 0xdd, 0x36, 0xef, 0x22, 0xb8, 0x0c, 0xf9, 0xc1, 0x60, 0xaf, 0x66,
0x21, 0x55, 0x7b, 0xb7, 0xbd, 0xd7, 0xad, 0xe5, 0xd1, 0x3c, 0xda, 0x3f, 0x7c, 0x30, 0xa8, 0x15,
0x36, 0xef, 0xc0, 0xa5, 0x57, 0x1a, 0x20, 0xcd, 0xde, 0xdb, 0xe5, 0x5d, 0x64, 0xaa, 0x42, 0xf9,
0x90, 0xf7, 0x8e, 0x77, 0x8f, 0xba, 0xb5, 0x1c, 0x06, 0xbe, 0xea, 0xb7, 0x1f, 0x76, 0x3b, 0x35,
0xab, 0x55, 0x7b, 0xf6, 0x62, 0x35, 0xf7, 0xeb, 0x8b, 0xd5, 0xdc, 0x9f, 0x2f, 0x56, 0x73, 0xdf,
0xfd, 0xb5, 0xba, 0x70, 0x52, 0xa2, 0x3f, 0x03, 0xb7, 0xff, 0x0e, 0x00, 0x00, 0xff, 0xff, 0x8c,
0x1e, 0x1e, 0x98, 0x4c, 0x0c, 0x00, 0x00,
// 1444 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x57, 0x4b, 0x6f, 0x1b, 0x47,
0x12, 0x16, 0x87, 0xcf, 0x29, 0x4a, 0x32, 0xb7, 0xfd, 0x58, 0xae, 0xd6, 0x2b, 0x69, 0xc7, 0xbb,
0x81, 0x2c, 0x59, 0x14, 0x40, 0x03, 0xb6, 0x91, 0x83, 0x11, 0xf1, 0x61, 0x88, 0x71, 0x24, 0x0a,
0x4d, 0x45, 0x39, 0x1a, 0xa3, 0x61, 0x93, 0x1a, 0x88, 0x9a, 0x1e, 0xcc, 0x34, 0x6d, 0xf1, 0x92,
0x83, 0x7f, 0x41, 0x80, 0x00, 0xb9, 0xe7, 0x98, 0x1f, 0x91, 0xbb, 0x8f, 0x41, 0x4e, 0x49, 0x0e,
0x4e, 0xa0, 0xfc, 0x91, 0xa0, 0xaa, 0x7b, 0x38, 0xe3, 0x47, 0x10, 0x1b, 0x09, 0x72, 0x62, 0x75,
0xd5, 0xd7, 0x5f, 0xd7, 0xab, 0xbb, 0x86, 0x60, 0xcb, 0x30, 0x6e, 0x84, 0x91, 0x54, 0x92, 0x59,
0xe1, 0xc9, 0xca, 0xf6, 0xd8, 0x57, 0xa7, 0xd3, 0x93, 0x86, 0x27, 0xcf, 0x77, 0xc6, 0x72, 0x2c,
0x77, 0xc8, 0x74, 0x32, 0x1d, 0xd1, 0x8a, 0x16, 0x24, 0xe9, 0x2d, 0xce, 0xd7, 0x16, 0x58, 0xfd,
0x90, 0xfd, 0x17, 0x4a, 0x7e, 0x10, 0x4e, 0x55, 0x5c, 0xcf, 0xad, 0xe7, 0x37, 0xaa, 0x4d, 0xbb,
0x11, 0x9e, 0x34, 0x7a, 0xa8, 0xe1, 0xc6, 0xc0, 0xd6, 0xa1, 0x20, 0x2e, 0x84, 0x57, 0xb7, 0xd6,
0x73, 0x1b, 0xd5, 0x26, 0x20, 0xa0, 0x7b, 0x21, 0xbc, 0x7e, 0xb8, 0xb7, 0xc0, 0xc9, 0xc2, 0x3e,
0x80, 0x52, 0x2c, 0xa7, 0x91, 0x27, 0xea, 0x79, 0xc2, 0x2c, 0x22, 0x66, 0x40, 0x1a, 0x42, 0x19,
0x2b, 0x32, 0x79, 0x32, 0x9c, 0xd5, 0x0b, 0x29, 0x53, 0x5b, 0x86, 0x33, 0xcd, 0x84, 0x16, 0x76,
0x0b, 0x8a, 0x27, 0x53, 0x7f, 0x32, 0xac, 0x17, 0x09, 0x52, 0x45, 0x48, 0x0b, 0x15, 0x84, 0xd1,
0x36, 0xb6, 0x01, 0x95, 0x70, 0xe2, 0xaa, 0x91, 0x8c, 0xce, 0xeb, 0x90, 0x1e, 0x78, 0x68, 0x74,
0x7c, 0x6e, 0x65, 0xf7, 0xa1, 0xea, 0xc9, 0x20, 0x56, 0x91, 0xeb, 0x07, 0x2a, 0xae, 0x57, 0x09,
0x7c, 0x1d, 0xc1, 0x9f, 0xc9, 0xe8, 0x4c, 0x44, 0xed, 0xd4, 0xc8, 0xb3, 0xc8, 0x56, 0x01, 0x2c,
0x19, 0x3a, 0x5f, 0xe5, 0xa0, 0x92, 0xb0, 0x32, 0x07, 0x16, 0x77, 0x23, 0xef, 0xd4, 0x57, 0xc2,
0x53, 0xd3, 0x48, 0xd4, 0x73, 0xeb, 0xb9, 0x0d, 0x9b, 0xbf, 0xa2, 0x63, 0xcb, 0x60, 0xf5, 0x07,
0x94, 0x28, 0x9b, 0x5b, 0xfd, 0x01, 0xab, 0x43, 0xf9, 0xd8, 0x8d, 0x7c, 0x37, 0x50, 0x94, 0x19,
0x9b, 0x27, 0x4b, 0x76, 0x13, 0xec, 0xfe, 0xe0, 0x58, 0x44, 0xb1, 0x2f, 0x03, 0xca, 0x87, 0xcd,
0x53, 0x05, 0x5b, 0x05, 0xe8, 0x0f, 0x1e, 0x09, 0x17, 0x49, 0xe3, 0x7a, 0x71, 0x3d, 0xbf, 0x61,
0xf3, 0x8c, 0xc6, 0xf9, 0x1c, 0x8a, 0x54, 0x23, 0xf6, 0x31, 0x94, 0x86, 0xfe, 0x58, 0xc4, 0x4a,
0xbb, 0xd3, 0x6a, 0xbe, 0x78, 0xb9, 0xb6, 0xf0, 0xd3, 0xcb, 0xb5, 0xcd, 0x4c, 0x33, 0xc8, 0x50,
0x04, 0x9e, 0x0c, 0x94, 0xeb, 0x07, 0x22, 0x8a, 0x77, 0xc6, 0x72, 0x5b, 0x6f, 0x69, 0x74, 0xe8,
0x87, 0x1b, 0x06, 0x76, 0x1b, 0x8a, 0x7e, 0x30, 0x14, 0x17, 0xe4, 0x7f, 0xbe, 0x75, 0xd5, 0x50,
0x55, 0xfb, 0x53, 0x15, 0x4e, 0x55, 0x0f, 0x4d, 0x5c, 0x23, 0x9c, 0x10, 0x4a, 0xba, 0x05, 0xd8,
0x4d, 0x28, 0x9c, 0x0b, 0xe5, 0xd2, 0xf1, 0xd5, 0x66, 0x05, 0x53, 0xbb, 0x2f, 0x94, 0xcb, 0x49,
0x8b, 0xdd, 0x75, 0x2e, 0xa7, 0x98, 0x7a, 0x2b, 0xed, 0xae, 0x7d, 0xd4, 0x70, 0x63, 0x60, 0xff,
0x87, 0x72, 0x20, 0xd4, 0x33, 0x19, 0x9d, 0x51, 0x8a, 0x96, 0x75, 0xcd, 0x0f, 0x84, 0xda, 0x97,
0x43, 0xc1, 0x13, 0x9b, 0xf3, 0x4d, 0x0e, 0x0a, 0x48, 0xcc, 0x18, 0x14, 0xdc, 0x68, 0xac, 0xdb,
0xd5, 0xe6, 0x24, 0xb3, 0x1a, 0xe4, 0x45, 0xf0, 0x94, 0xce, 0xb0, 0x39, 0x8a, 0xa8, 0xf1, 0x9e,
0x0d, 0x4d, 0xd2, 0x51, 0xc4, 0x7d, 0xd3, 0x58, 0x44, 0x26, 0xd7, 0x24, 0xb3, 0xdb, 0x60, 0x87,
0x91, 0xbc, 0x98, 0x3d, 0xc1, 0xdd, 0xc5, 0x4c, 0x27, 0xa1, 0xb2, 0x1b, 0x3c, 0xe5, 0x95, 0xd0,
0x48, 0x6c, 0x13, 0x40, 0x5c, 0xa8, 0xc8, 0xdd, 0x93, 0xb1, 0x8a, 0xeb, 0x25, 0x8a, 0x86, 0x1a,
0x18, 0x15, 0xbd, 0x43, 0x9e, 0xb1, 0x3a, 0xdf, 0x5b, 0x50, 0xa4, 0x20, 0xd9, 0x06, 0xa6, 0x34,
0x9c, 0xea, 0xea, 0xe4, 0x5b, 0xcc, 0xa4, 0x14, 0xa8, 0x78, 0xf3, 0x8c, 0x62, 0x21, 0x57, 0xa0,
0x12, 0x8b, 0x89, 0xf0, 0x94, 0x8c, 0x4c, 0xff, 0xcc, 0xd7, 0xe8, 0xfa, 0x10, 0x4b, 0xac, 0xa3,
0x21, 0x99, 0x6d, 0x41, 0x49, 0x52, 0x5d, 0x28, 0xa0, 0xdf, 0xa9, 0x96, 0x81, 0x20, 0x79, 0x24,
0xdc, 0xa1, 0x0c, 0x26, 0x33, 0x0a, 0xb3, 0xc2, 0xe7, 0x6b, 0xb6, 0x05, 0x36, 0x55, 0xe2, 0x68,
0x16, 0x8a, 0x7a, 0x89, 0x2a, 0xb0, 0x34, 0xaf, 0x12, 0x2a, 0x79, 0x6a, 0xc7, 0x9b, 0xe7, 0xb9,
0xde, 0xa9, 0xe8, 0x87, 0xaa, 0x7e, 0x2d, 0xcd, 0x57, 0xdb, 0xe8, 0xf8, 0xdc, 0x8a, 0xb4, 0xb1,
0xf0, 0x22, 0xa1, 0x10, 0x7a, 0x9d, 0xa0, 0x44, 0x3b, 0x48, 0x94, 0x3c, 0xb5, 0x33, 0x07, 0x4a,
0x83, 0xc1, 0x1e, 0x22, 0x6f, 0xa4, 0x2f, 0x83, 0xd6, 0x70, 0x63, 0x71, 0x7a, 0x50, 0x49, 0x8e,
0xc1, 0x6b, 0xd6, 0xeb, 0x98, 0x0b, 0x68, 0xf5, 0x3a, 0x6c, 0x1b, 0xca, 0xf1, 0xa9, 0x1b, 0xf9,
0xc1, 0x98, 0x72, 0xb7, 0xdc, 0xbc, 0x3a, 0xf7, 0x6a, 0xa0, 0xf5, 0xc8, 0x94, 0x60, 0x1c, 0x09,
0xf6, 0xdc, 0x8d, 0x37, 0xb8, 0x6a, 0x90, 0x9f, 0xfa, 0x43, 0xe2, 0x59, 0xe2, 0x28, 0xa2, 0x66,
0xec, 0xeb, 0x5e, 0x5a, 0xe2, 0x28, 0x62, 0x41, 0xce, 0xe5, 0x50, 0x50, 0xea, 0x97, 0x38, 0xc9,
0x98, 0x63, 0x19, 0x2a, 0x5f, 0x06, 0xee, 0x24, 0xc9, 0x71, 0xb2, 0x76, 0x26, 0x49, 0x7c, 0x7f,
0xcb, 0x69, 0x0f, 0xa1, 0xa4, 0x5f, 0x55, 0xb6, 0x0e, 0xf9, 0x38, 0xf2, 0xcc, 0xcb, 0xbe, 0x9c,
0x3c, 0xb7, 0xfa, 0x61, 0xe6, 0x68, 0x9a, 0xb7, 0x96, 0x95, 0xb6, 0x96, 0xc3, 0x01, 0x52, 0xd8,
0x5f, 0xd3, 0xc2, 0xce, 0x97, 0x39, 0xa8, 0x24, 0x03, 0x01, 0x5f, 0x37, 0x7f, 0x28, 0x02, 0xe5,
0x8f, 0x7c, 0x11, 0x99, 0x64, 0x64, 0x34, 0x6c, 0x1b, 0x8a, 0xae, 0x52, 0x51, 0xf2, 0x68, 0xfc,
0x33, 0x3b, 0x4d, 0x1a, 0xbb, 0x68, 0xe9, 0x06, 0x2a, 0x9a, 0x71, 0x8d, 0x5a, 0x79, 0x00, 0x90,
0x2a, 0x31, 0x7f, 0x67, 0x62, 0x66, 0x58, 0x51, 0x64, 0xd7, 0xa0, 0xf8, 0xd4, 0x9d, 0x4c, 0x85,
0x71, 0x4a, 0x2f, 0x3e, 0xb4, 0x1e, 0xe4, 0x9c, 0x6f, 0x2d, 0x28, 0x9b, 0xe9, 0xc2, 0xee, 0x40,
0x99, 0xa6, 0x8b, 0xf1, 0xe8, 0xed, 0x91, 0x26, 0x10, 0xb6, 0x33, 0x1f, 0x9b, 0x19, 0x1f, 0x0d,
0x95, 0x1e, 0x9f, 0xc6, 0xc7, 0x74, 0x88, 0xe6, 0x87, 0x62, 0x64, 0xe6, 0x23, 0x95, 0xa2, 0x23,
0x46, 0x7e, 0xe0, 0x63, 0xcd, 0x38, 0x9a, 0xd8, 0x9d, 0x24, 0xea, 0x02, 0x31, 0xde, 0xc8, 0x32,
0xbe, 0x19, 0x74, 0x0f, 0xaa, 0x99, 0x63, 0xde, 0x12, 0xf5, 0xff, 0xb2, 0x51, 0x9b, 0x23, 0x89,
0x4e, 0x0f, 0xf7, 0x34, 0x0b, 0x7f, 0x22, 0x7f, 0xf7, 0x00, 0x52, 0xca, 0x77, 0xef, 0x14, 0xe7,
0x79, 0x1e, 0xa0, 0x1f, 0xe2, 0x73, 0x3e, 0x74, 0x69, 0x4a, 0x2c, 0xfa, 0xe3, 0x40, 0x46, 0xe2,
0x09, 0x3d, 0x1f, 0xb4, 0xbf, 0xc2, 0xab, 0x5a, 0x47, 0xb7, 0x98, 0xed, 0x42, 0x75, 0x28, 0x62,
0x2f, 0xf2, 0xa9, 0xc9, 0x4d, 0xd2, 0xd7, 0x30, 0xa6, 0x94, 0xa7, 0xd1, 0x49, 0x11, 0x3a, 0x57,
0xd9, 0x3d, 0xac, 0x09, 0x8b, 0xe2, 0x22, 0x94, 0x91, 0x32, 0xa7, 0xe8, 0x8f, 0x90, 0x2b, 0xfa,
0x73, 0x06, 0xf5, 0x74, 0x12, 0xaf, 0x8a, 0x74, 0xc1, 0x5c, 0x28, 0x78, 0x6e, 0xa8, 0x27, 0x70,
0xb5, 0x59, 0x7f, 0xed, 0xbc, 0xb6, 0x1b, 0xea, 0xa4, 0xb5, 0xee, 0x62, 0xac, 0xcf, 0x7f, 0x5e,
0xdb, 0xca, 0x8c, 0xdd, 0x73, 0x79, 0x32, 0xdb, 0xa1, 0x7e, 0x39, 0xf3, 0xd5, 0xce, 0x54, 0xf9,
0x93, 0x1d, 0x37, 0xf4, 0x91, 0x0e, 0x37, 0xf6, 0x3a, 0x9c, 0xa8, 0x57, 0x1e, 0x42, 0xed, 0x75,
0xbf, 0xdf, 0xa7, 0x06, 0x2b, 0xf7, 0xc1, 0x9e, 0xfb, 0xf1, 0x47, 0x1b, 0x2b, 0xd9, 0xe2, 0xdd,
0x82, 0x6a, 0x26, 0x6e, 0x04, 0x1e, 0x13, 0x50, 0x67, 0x5f, 0x2f, 0x9c, 0xe7, 0xf8, 0x05, 0x94,
0xcc, 0xc0, 0xff, 0x00, 0x9c, 0x2a, 0x15, 0x3e, 0xa1, 0xa1, 0x68, 0x0e, 0xb1, 0x51, 0x43, 0x08,
0xb6, 0x06, 0x55, 0x5c, 0xc4, 0xc6, 0xae, 0x3d, 0xa5, 0x1d, 0xb1, 0x06, 0xfc, 0x1b, 0xec, 0xd1,
0x7c, 0xbb, 0x1e, 0x66, 0x95, 0x51, 0xb2, 0xfb, 0x5f, 0x50, 0x09, 0xa4, 0xb1, 0xe9, 0x19, 0x5d,
0x0e, 0x24, 0x99, 0x9c, 0x2d, 0xf8, 0xc7, 0x1b, 0x9f, 0x6b, 0xec, 0x06, 0x94, 0x46, 0xfe, 0x44,
0xd1, 0x75, 0xc5, 0xb1, 0x6f, 0x56, 0xce, 0x8f, 0x39, 0x80, 0xf4, 0x6a, 0x61, 0x46, 0xf0, 0xde,
0x21, 0x66, 0x51, 0xdf, 0xb3, 0x09, 0x54, 0xce, 0x4d, 0x05, 0x4d, 0x1f, 0xdd, 0x7c, 0xf5, 0x3a,
0x36, 0x92, 0x02, 0xeb, 0xda, 0x36, 0x4d, 0x6d, 0xdf, 0xe7, 0x93, 0x6a, 0x7e, 0xc2, 0xca, 0x63,
0x58, 0x7a, 0x85, 0xee, 0x1d, 0x6f, 0x6a, 0xda, 0x65, 0xd9, 0x92, 0xdd, 0x81, 0x92, 0xfe, 0xdc,
0xc0, 0x77, 0x1b, 0x25, 0x43, 0x43, 0x32, 0xcd, 0x96, 0xc3, 0xe4, 0xe3, 0xb3, 0x77, 0xb8, 0xb9,
0x01, 0x65, 0xf3, 0x19, 0xc5, 0x6c, 0x28, 0x7e, 0x7a, 0x30, 0xe8, 0x1e, 0xd5, 0x16, 0x58, 0x05,
0x0a, 0x7b, 0xfd, 0xc1, 0x51, 0x2d, 0x87, 0xd2, 0x41, 0xff, 0xa0, 0x5b, 0xb3, 0x36, 0x3f, 0x02,
0x7b, 0x3e, 0xee, 0x51, 0xdd, 0xea, 0x1d, 0x74, 0x6a, 0x0b, 0x0c, 0xa0, 0x34, 0xe8, 0xb6, 0x79,
0x17, 0xc1, 0x65, 0xc8, 0x0f, 0x06, 0x7b, 0x35, 0x0b, 0xa9, 0xda, 0xbb, 0xed, 0xbd, 0x6e, 0x2d,
0x8f, 0xe2, 0xd1, 0xfe, 0xe1, 0xa3, 0x41, 0xad, 0xb0, 0x79, 0x0f, 0xae, 0xbc, 0x36, 0x6e, 0x69,
0xf7, 0xde, 0x2e, 0xef, 0x22, 0x53, 0x15, 0xca, 0x87, 0xbc, 0x77, 0xbc, 0x7b, 0xd4, 0xad, 0xe5,
0xd0, 0xf0, 0x49, 0xbf, 0xfd, 0xb8, 0xdb, 0xa9, 0x59, 0xad, 0x6b, 0x2f, 0x2e, 0x57, 0x73, 0xdf,
0x5d, 0xae, 0xe6, 0x7e, 0xb8, 0x5c, 0xcd, 0xfd, 0x72, 0xb9, 0x9a, 0xfb, 0xe2, 0xd7, 0xd5, 0x85,
0x93, 0x12, 0xfd, 0x45, 0xb9, 0xfb, 0x5b, 0x00, 0x00, 0x00, 0xff, 0xff, 0x84, 0xfe, 0x08, 0x0c,
0xe2, 0x0c, 0x00, 0x00,
}

View File

@ -6,6 +6,8 @@ package pb;
import "github.com/gogo/protobuf/gogoproto/gogo.proto";
option (gogoproto.stable_marshaler_all) = true;
// Op represents a vertex of the LLB DAG.
message Op {
// inputs is a set of input edges.
@ -72,6 +74,7 @@ message Mount {
MountType mountType = 6;
CacheOpt cacheOpt = 20;
SecretOpt secretOpt = 21;
SSHOpt SSHOpt = 22;
}
// MountType defines a type of a mount from a supported set
@ -116,6 +119,21 @@ message SecretOpt {
bool optional = 5;
}
// SSHOpt defines options describing secret mounts
message SSHOpt {
// ID of exposed ssh rule. Used for quering the value.
string ID = 1;
// UID of agent socket
uint32 uid = 2;
// GID of agent socket
uint32 gid = 3;
// Mode is the filesystem mode of agent socket
uint32 mode = 4;
// Optional defines if ssh socket is required. Error is produced
// if client does not expose ssh.
bool optional = 5;
}
// CopyOp copies files across Ops.
message CopyOp {
repeated CopySource src = 1;

View File

@ -1,12 +1,12 @@
github.com/boltdb/bolt e9cf4fae01b5a8ff89d0ec6b32f0d9c9f79aefdd
github.com/pkg/errors v0.8.0
go.etcd.io/bbolt v1.3.1-etcd.8
github.com/stretchr/testify v1.1.4
github.com/davecgh/go-spew v1.1.0
github.com/pmezard/go-difflib v1.0.0
golang.org/x/sys 1b2967e3c290b7c545b3db0deeda16e9be4f98a2
github.com/containerd/containerd v1.2.0-beta.2
github.com/containerd/containerd d97a907f7f781c0ab8340877d8e6b53cc7f1c2f6
github.com/containerd/typeurl a93fcdb778cd272c6e9b3028b2f42d813e785d40
golang.org/x/sync 450f422ab23cf9881c94e2db30cac0eb1b7cf80c
github.com/sirupsen/logrus v1.0.0
@ -16,19 +16,20 @@ golang.org/x/net 0ed95abb35c445290478a5348a7b38bb154135fd
github.com/gogo/protobuf v1.0.0
github.com/gogo/googleapis b23578765ee54ff6bceff57f397d833bf4ca6869
github.com/golang/protobuf v1.1.0
github.com/containerd/continuity d3c23511c1bf5851696cba83143d9cbcd666869b
github.com/containerd/continuity f44b615e492bdfb371aae2f76ec694d9da1db537
github.com/opencontainers/image-spec v1.0.1
github.com/opencontainers/runc 20aff4f0488c6d4b8df4d85b4f63f1f704c11abd
github.com/Microsoft/go-winio v0.4.10
github.com/Microsoft/go-winio v0.4.11
github.com/containerd/fifo 3d5202aec260678c48179c56f40e6f38a095738c
github.com/opencontainers/runtime-spec d810dbc60d8c5aeeb3d054bd1132fab2121968ce # v1.0.1-43-gd810dbc
github.com/containerd/go-runc acb7c88cac264acca9b5eae187a117f4d77a1292
github.com/opencontainers/runtime-spec eba862dc2470385a233c7507392675cbeadf7353 # v1.0.1-45-geba862d
github.com/containerd/go-runc 5a6d9f37cfa36b15efba46dc7ea349fa9b7143c3
github.com/containerd/console c12b1e7919c14469339a5d38f2f8ed9b64a9de23
google.golang.org/genproto d80a6e20e776b0b17a324d0ba1ab50a39c8e8944
golang.org/x/text 19e51611da83d6be54ddafce4a4af510cb3e9ea4
github.com/docker/go-events 9461782956ad83b30282bf90e31fa6a70c255ba9
github.com/syndtr/gocapability db04d3cc01c8b54962a58ec7e491717d06cfcc16
github.com/Microsoft/hcsshim 44c060121b68e8bdc40b411beba551f3b4ee9e55
github.com/Microsoft/hcsshim v0.7.3
golang.org/x/crypto 0709b304e793a5edb4a2c0145f281ecdc20838a4
github.com/urfave/cli 7bc6a0acffa589f415f88aca16cc1de5ffd66f9c
github.com/morikuni/aec 39771216ff4c63d11f5e604076f9c45e8be1067b
@ -39,7 +40,7 @@ golang.org/x/time f51c12702a4d776e4c1fa9b0fabab841babae631
github.com/docker/docker 71cd53e4a197b303c6ba086bd584ffd67a884281
github.com/pkg/profile 5b67d428864e92711fcbd2f8629456121a56d91f
github.com/tonistiigi/fsutil b19464cd1b6a00773b4f2eb7acf9c30426f9df42
github.com/tonistiigi/fsutil f567071bed2416e4d87d260d3162722651182317
github.com/hashicorp/go-immutable-radix 826af9ccf0feeee615d546d69b11f8e98da8c8f1 git://github.com/tonistiigi/go-immutable-radix.git
github.com/hashicorp/golang-lru a0d98a5f288019575c6d1f4bb1573fef2d1fcdc4
github.com/mitchellh/hashstructure 2bca23e0e452137f789efbc8610126fd8b94f73b
@ -49,8 +50,9 @@ github.com/docker/distribution 30578ca32960a4d368bf6db67b0a33c2a1f3dc6f
github.com/tonistiigi/units 6950e57a87eaf136bbe44ef2ec8e75b9e3569de2
github.com/docker/cli 99576756eb3303b7af8102c502f21a912e3c1af6 https://github.com/tonistiigi/docker-cli.git
github.com/docker/docker-credential-helpers d68f9aeca33f5fd3f08eeae5e9d175edf4e731d1
github.com/docker/libnetwork 822e5b59d346b7ad0735df2c8e445e9787320e67
github.com/docker/libnetwork 36d3bed0e9f4b3c8c66df9bd45278bb90b33e911
github.com/BurntSushi/toml 3012a1dbe2e4bd1391d42b32f0577cb7bbc7f005
github.com/ishidawataru/sctp 07191f837fedd2f13d1ec7b5f885f0f3ec54b1cb
github.com/grpc-ecosystem/grpc-opentracing 8e809c8a86450a29b90dcc9efbf062d0fe6d9746
github.com/opentracing/opentracing-go 1361b9cd60be79c4c3a7fa9841b3c132e40066a7

View File

@ -4,6 +4,8 @@ import (
"context"
"hash"
"os"
"github.com/tonistiigi/fsutil/types"
)
type walkerFn func(ctx context.Context, pathC chan<- *currentPath) error
@ -14,7 +16,7 @@ func Changes(ctx context.Context, a, b walkerFn, changeFn ChangeFunc) error {
type HandleChangeFn func(ChangeKind, string, os.FileInfo, error) error
type ContentHasher func(*Stat) (hash.Hash, error)
type ContentHasher func(*types.Stat) (hash.Hash, error)
func GetWalkerFn(root string) walkerFn {
return func(ctx context.Context, pathC chan<- *currentPath) error {

View File

@ -5,6 +5,7 @@ import (
"os"
"strings"
"github.com/tonistiigi/fsutil/types"
"golang.org/x/sync/errgroup"
)
@ -170,11 +171,11 @@ func sameFile(f1, f2 *currentPath) (same bool, retErr error) {
}
}
ls1, ok := f1.f.Sys().(*Stat)
ls1, ok := f1.f.Sys().(*types.Stat)
if !ok {
return false, nil
}
ls2, ok := f2.f.Sys().(*Stat)
ls2, ok := f2.f.Sys().(*types.Stat)
if !ok {
return false, nil
}
@ -185,7 +186,7 @@ func sameFile(f1, f2 *currentPath) (same bool, retErr error) {
// compareStat returns whether the stats are equivalent,
// whether the files are considered the same file, and
// an error
func compareStat(ls1, ls2 *Stat) (bool, error) {
func compareStat(ls1, ls2 *types.Stat) (bool, error) {
return ls1.Mode == ls2.Mode && ls1.Uid == ls2.Uid && ls1.Gid == ls2.Gid && ls1.Devmajor == ls2.Devmajor && ls1.Devminor == ls2.Devminor && ls1.Linkname == ls2.Linkname, nil
}

View File

@ -12,6 +12,7 @@ import (
"github.com/opencontainers/go-digest"
"github.com/pkg/errors"
"github.com/tonistiigi/fsutil/types"
"golang.org/x/sync/errgroup"
)
@ -25,7 +26,7 @@ type DiskWriterOpt struct {
Filter FilterFunc
}
type FilterFunc func(*Stat) bool
type FilterFunc func(*types.Stat) bool
type DiskWriter struct {
opt DiskWriterOpt
@ -95,7 +96,7 @@ func (dw *DiskWriter) HandleChange(kind ChangeKind, p string, fi os.FileInfo, er
return nil
}
stat, ok := fi.Sys().(*Stat)
stat, ok := fi.Sys().(*types.Stat)
if !ok {
return errors.Errorf("%s invalid change without stat information", p)
}
@ -246,7 +247,7 @@ type hashedWriter struct {
}
func newHashWriter(ch ContentHasher, fi os.FileInfo, w io.WriteCloser) (*hashedWriter, error) {
stat, ok := fi.Sys().(*Stat)
stat, ok := fi.Sys().(*types.Stat)
if !ok {
return nil, errors.Errorf("invalid change without stat information")
}

View File

@ -8,9 +8,10 @@ import (
"github.com/containerd/continuity/sysx"
"github.com/pkg/errors"
"github.com/tonistiigi/fsutil/types"
)
func rewriteMetadata(p string, stat *Stat) error {
func rewriteMetadata(p string, stat *types.Stat) error {
for key, value := range stat.Xattrs {
sysx.Setxattr(p, key, value, 0)
}
@ -34,7 +35,7 @@ func rewriteMetadata(p string, stat *Stat) error {
// handleTarTypeBlockCharFifo is an OS-specific helper function used by
// createTarFile to handle the following types of header: Block; Char; Fifo
func handleTarTypeBlockCharFifo(path string, stat *Stat) error {
func handleTarTypeBlockCharFifo(path string, stat *types.Stat) error {
mode := uint32(stat.Mode & 07777)
if os.FileMode(stat.Mode)&os.ModeCharDevice != 0 {
mode |= syscall.S_IFCHR

View File

@ -4,14 +4,15 @@ package fsutil
import (
"github.com/pkg/errors"
"github.com/tonistiigi/fsutil/types"
)
func rewriteMetadata(p string, stat *Stat) error {
func rewriteMetadata(p string, stat *types.Stat) error {
return chtimes(p, stat.ModTime)
}
// handleTarTypeBlockCharFifo is an OS-specific helper function used by
// createTarFile to handle the following types of header: Block; Char; Fifo
func handleTarTypeBlockCharFifo(path string, stat *Stat) error {
func handleTarTypeBlockCharFifo(path string, stat *types.Stat) error {
return errors.New("Not implemented on windows")
}

View File

@ -9,6 +9,7 @@ import (
"strings"
"github.com/pkg/errors"
"github.com/tonistiigi/fsutil/types"
)
type FS interface {
@ -36,13 +37,13 @@ func (fs *fs) Open(p string) (io.ReadCloser, error) {
return os.Open(filepath.Join(fs.root, p))
}
func SubDirFS(fs FS, stat Stat) FS {
func SubDirFS(fs FS, stat types.Stat) FS {
return &subDirFS{fs: fs, stat: stat}
}
type subDirFS struct {
fs FS
stat Stat
stat types.Stat
}
func (fs *subDirFS) Walk(ctx context.Context, fn filepath.WalkFunc) error {
@ -57,7 +58,7 @@ func (fs *subDirFS) Walk(ctx context.Context, fn filepath.WalkFunc) error {
return err
}
return fs.fs.Walk(ctx, func(p string, fi os.FileInfo, err error) error {
stat, ok := fi.Sys().(*Stat)
stat, ok := fi.Sys().(*types.Stat)
if !ok {
return errors.Wrapf(err, "invalid fileinfo without stat info: %s", p)
}

View File

@ -4,6 +4,7 @@ import (
"os"
"github.com/pkg/errors"
"github.com/tonistiigi/fsutil/types"
)
// Hardlinks validates that all targets for links were part of the changes
@ -25,7 +26,7 @@ func (v *Hardlinks) HandleChange(kind ChangeKind, p string, fi os.FileInfo, err
return nil
}
stat, ok := fi.Sys().(*Stat)
stat, ok := fi.Sys().(*types.Stat)
if !ok {
return errors.Errorf("invalid change without stat info: %s", p)
}

View File

@ -7,6 +7,7 @@ import (
"sync"
"github.com/pkg/errors"
"github.com/tonistiigi/fsutil/types"
"golang.org/x/sync/errgroup"
)
@ -119,7 +120,7 @@ func (r *receiver) run(ctx context.Context) error {
g.Go(func() (retErr error) {
defer func() {
if retErr != nil {
r.conn.SendMsg(&Packet{Type: PACKET_ERR, Data: []byte(retErr.Error())})
r.conn.SendMsg(&types.Packet{Type: types.PACKET_ERR, Data: []byte(retErr.Error())})
}
}()
destWalker := emptyWalker
@ -133,7 +134,7 @@ func (r *receiver) run(ctx context.Context) error {
if err := dw.Wait(ctx); err != nil {
return err
}
r.conn.SendMsg(&Packet{Type: PACKET_FIN})
r.conn.SendMsg(&types.Packet{Type: types.PACKET_FIN})
return nil
})
@ -146,9 +147,9 @@ func (r *receiver) run(ctx context.Context) error {
r.progressCb(size, true)
}()
}
var p Packet
var p types.Packet
for {
p = Packet{Data: p.Data[:0]}
p = types.Packet{Data: p.Data[:0]}
if err := r.conn.RecvMsg(&p); err != nil {
return err
}
@ -158,9 +159,9 @@ func (r *receiver) run(ctx context.Context) error {
}
switch p.Type {
case PACKET_ERR:
case types.PACKET_ERR:
return errors.Errorf("error from sender: %s", p.Data)
case PACKET_STAT:
case types.PACKET_STAT:
if p.Stat == nil {
if err := w.update(nil); err != nil {
return err
@ -183,12 +184,12 @@ func (r *receiver) run(ctx context.Context) error {
if err := w.update(cp); err != nil {
return err
}
case PACKET_DATA:
case types.PACKET_DATA:
r.muPipes.Lock()
pw, ok := r.pipes[p.ID]
r.muPipes.Unlock()
if !ok {
return errors.Errorf("invalid file request %s", p.ID)
return errors.Errorf("invalid file request %d", p.ID)
}
if len(p.Data) == 0 {
if err := pw.Close(); err != nil {
@ -199,9 +200,9 @@ func (r *receiver) run(ctx context.Context) error {
return err
}
}
case PACKET_FIN:
case types.PACKET_FIN:
for {
var p Packet
var p types.Packet
if err := r.conn.RecvMsg(&p); err != nil {
if err == io.EOF {
return nil
@ -229,7 +230,7 @@ func (r *receiver) asyncDataFunc(ctx context.Context, p string, wc io.WriteClose
r.muPipes.Lock()
r.pipes[id] = wwc
r.muPipes.Unlock()
if err := r.conn.SendMsg(&Packet{Type: PACKET_REQ, ID: id}); err != nil {
if err := r.conn.SendMsg(&types.Packet{Type: types.PACKET_REQ, ID: id}); err != nil {
return err
}
err := wwc.Wait(ctx)

View File

@ -7,6 +7,7 @@ import (
"sync"
"github.com/pkg/errors"
"github.com/tonistiigi/fsutil/types"
"golang.org/x/sync/errgroup"
)
@ -56,7 +57,7 @@ func (s *sender) run(ctx context.Context) error {
g.Go(func() error {
err := s.walk(ctx)
if err != nil {
s.conn.SendMsg(&Packet{Type: PACKET_ERR, Data: []byte(err.Error())})
s.conn.SendMsg(&types.Packet{Type: types.PACKET_ERR, Data: []byte(err.Error())})
}
return err
})
@ -86,19 +87,19 @@ func (s *sender) run(ctx context.Context) error {
return ctx.Err()
default:
}
var p Packet
var p types.Packet
if err := s.conn.RecvMsg(&p); err != nil {
return err
}
switch p.Type {
case PACKET_ERR:
case types.PACKET_ERR:
return errors.Errorf("error from receiver: %s", p.Data)
case PACKET_REQ:
case types.PACKET_REQ:
if err := s.queue(p.ID); err != nil {
return err
}
case PACKET_FIN:
return s.conn.SendMsg(&Packet{Type: PACKET_FIN})
case types.PACKET_FIN:
return s.conn.SendMsg(&types.Packet{Type: types.PACKET_FIN})
}
}
})
@ -136,7 +137,7 @@ func (s *sender) sendFile(h *sendHandle) error {
return err
}
}
return s.conn.SendMsg(&Packet{ID: h.id, Type: PACKET_DATA})
return s.conn.SendMsg(&types.Packet{ID: h.id, Type: types.PACKET_DATA})
}
func (s *sender) walk(ctx context.Context) error {
@ -145,13 +146,13 @@ func (s *sender) walk(ctx context.Context) error {
if err != nil {
return err
}
stat, ok := fi.Sys().(*Stat)
stat, ok := fi.Sys().(*types.Stat)
if !ok {
return errors.Wrapf(err, "invalid fileinfo without stat info: %s", path)
}
p := &Packet{
Type: PACKET_STAT,
p := &types.Packet{
Type: types.PACKET_STAT,
Stat: stat,
}
if fileCanRequestData(os.FileMode(stat.Mode)) {
@ -166,7 +167,7 @@ func (s *sender) walk(ctx context.Context) error {
if err != nil {
return err
}
return errors.Wrapf(s.conn.SendMsg(&Packet{Type: PACKET_STAT}), "failed to send last stat")
return errors.Wrapf(s.conn.SendMsg(&types.Packet{Type: types.PACKET_STAT}), "failed to send last stat")
}
func fileCanRequestData(m os.FileMode) bool {
@ -184,7 +185,7 @@ func (fs *fileSender) Write(dt []byte) (int, error) {
if len(dt) == 0 {
return 0, nil
}
p := &Packet{Type: PACKET_DATA, ID: fs.id, Data: dt}
p := &types.Packet{Type: types.PACKET_DATA, ID: fs.id, Data: dt}
if err := fs.sender.conn.SendMsg(p); err != nil {
return 0, err
}

61
vendor/github.com/tonistiigi/fsutil/stat.go generated vendored Normal file
View File

@ -0,0 +1,61 @@
package fsutil
import (
"os"
"path/filepath"
"runtime"
"github.com/pkg/errors"
"github.com/tonistiigi/fsutil/types"
)
// constructs a Stat object. path is where the path can be found right
// now, relpath is the desired path to be recorded in the stat (so
// relative to whatever base dir is relevant). fi is the os.Stat
// info. inodemap is used to calculate hardlinks over a series of
// mkstat calls and maps inode to the canonical (aka "first") path for
// a set of hardlinks to that inode.
func mkstat(path, relpath string, fi os.FileInfo, inodemap map[uint64]string) (*types.Stat, error) {
relpath = filepath.ToSlash(relpath)
stat := &types.Stat{
Path: relpath,
Mode: uint32(fi.Mode()),
ModTime: fi.ModTime().UnixNano(),
}
setUnixOpt(fi, stat, relpath, inodemap)
if !fi.IsDir() {
stat.Size_ = fi.Size()
if fi.Mode()&os.ModeSymlink != 0 {
link, err := os.Readlink(path)
if err != nil {
return nil, errors.Wrapf(err, "failed to readlink %s", path)
}
stat.Linkname = link
}
}
if err := loadXattr(path, stat); err != nil {
return nil, errors.Wrapf(err, "failed to xattr %s", relpath)
}
if runtime.GOOS == "windows" {
permPart := stat.Mode & uint32(os.ModePerm)
noPermPart := stat.Mode &^ uint32(os.ModePerm)
// Add the x bit: make everything +x from windows
permPart |= 0111
permPart &= 0755
stat.Mode = noPermPart | permPart
}
return stat, nil
}
func Stat(path string) (*types.Stat, error) {
fi, err := os.Lstat(path)
if err != nil {
return nil, errors.Wrap(err, "os stat")
}
return mkstat(path, filepath.Base(path), fi, nil)
}

View File

@ -8,11 +8,15 @@ import (
"github.com/containerd/continuity/sysx"
"github.com/pkg/errors"
"github.com/tonistiigi/fsutil/types"
)
func loadXattr(origpath string, stat *Stat) error {
func loadXattr(origpath string, stat *types.Stat) error {
xattrs, err := sysx.LListxattr(origpath)
if err != nil {
if errors.Cause(err) == syscall.ENOTSUP {
return nil
}
return errors.Wrapf(err, "failed to xattr %s", origpath)
}
if len(xattrs) > 0 {
@ -28,7 +32,7 @@ func loadXattr(origpath string, stat *Stat) error {
return nil
}
func setUnixOpt(fi os.FileInfo, stat *Stat, path string, seenFiles map[uint64]string) {
func setUnixOpt(fi os.FileInfo, stat *types.Stat, path string, seenFiles map[uint64]string) {
s := fi.Sys().(*syscall.Stat_t)
stat.Uid = s.Uid
@ -42,13 +46,15 @@ func setUnixOpt(fi os.FileInfo, stat *Stat, path string, seenFiles map[uint64]st
}
ino := s.Ino
if s.Nlink > 1 {
if oldpath, ok := seenFiles[ino]; ok {
stat.Linkname = oldpath
stat.Size_ = 0
if seenFiles != nil {
if s.Nlink > 1 {
if oldpath, ok := seenFiles[ino]; ok {
stat.Linkname = oldpath
stat.Size_ = 0
}
}
seenFiles[ino] = path
}
seenFiles[ino] = path
}
}

16
vendor/github.com/tonistiigi/fsutil/stat_windows.go generated vendored Normal file
View File

@ -0,0 +1,16 @@
// +build windows
package fsutil
import (
"os"
"github.com/tonistiigi/fsutil/types"
)
func loadXattr(_ string, _ *types.Stat) error {
return nil
}
func setUnixOpt(_ os.FileInfo, _ *types.Stat, _ string, _ map[uint64]string) {
}

Some files were not shown because too many files have changed in this diff Show More