From 5eea459bde4b852c58fc42e26cfbbca55314542d Mon Sep 17 00:00:00 2001 From: decentral1se Date: Wed, 1 Apr 2026 19:32:23 +0200 Subject: [PATCH] WIP: feat: use compose-go See https://git.coopcloud.tech/toolshed/abra/issues/492 --- cli/app/deploy.go | 3 +- cli/app/labels.go | 6 +- cli/app/move.go | 3 +- cli/app/ps.go | 18 +- cli/app/rollback.go | 2 +- cli/app/undeploy.go | 3 +- cli/app/upgrade.go | 2 +- cli/internal/recipe.go | 3 +- cli/internal/validate.go | 15 - cli/recipe/release.go | 3 +- cli/recipe/upgrade.go | 2 +- go.mod | 6 + go.sum | 13 + pkg/app/app.go | 23 +- pkg/app/compose.go | 14 +- pkg/app/compose_test.go | 12 +- pkg/autocomplete/autocomplete.go | 2 +- pkg/deploy/utils.go | 6 +- pkg/lint/recipe.go | 39 +- pkg/recipe/compose.go | 46 +- pkg/recipe/git.go | 2 +- pkg/secret/secret.go | 6 +- pkg/test/test.go | 13 +- pkg/upstream/convert/compose.go | 28 +- pkg/upstream/convert/compose_test.go | 170 - pkg/upstream/convert/service.go | 169 +- pkg/upstream/convert/service_test.go | 678 ---- pkg/upstream/convert/volume.go | 22 +- pkg/upstream/convert/volume_test.go | 361 -- pkg/upstream/stack/loader.go | 73 +- pkg/upstream/stack/loader_test.go | 26 + pkg/upstream/stack/stack.go | 8 +- .../test_recipe/compose.interpolate.yml | 7 + tests/resources/test_recipe/compose.yml | 1 - .../compose-spec/compose-go/v2/LICENSE | 191 ++ .../compose-spec/compose-go/v2/NOTICE | 2 + .../compose-spec/compose-go/v2/cli/options.go | 590 ++++ .../compose-go/v2/consts/consts.go | 29 + .../compose-spec/compose-go/v2/dotenv/LICENSE | 22 + .../compose-spec/compose-go/v2/dotenv/env.go | 73 + .../compose-go/v2/dotenv/format.go | 51 + .../compose-go/v2/dotenv/godotenv.go | 182 + .../compose-go/v2/dotenv/parser.go | 286 ++ .../compose-go/v2/errdefs/errors.go | 56 + .../compose-go/v2/format/volume.go | 199 ++ .../compose-spec/compose-go/v2/graph/cycle.go | 63 + .../compose-spec/compose-go/v2/graph/graph.go | 75 + .../compose-go/v2/graph/services.go | 80 + .../compose-go/v2/graph/traversal.go | 211 ++ .../v2/interpolation/interpolation.go | 137 + .../compose-go/v2/loader/environment.go | 110 + .../compose-go/v2/loader/example1.env | 10 + .../compose-go/v2/loader/example1.label | 10 + .../compose-go/v2/loader/example2.env | 4 + .../compose-go/v2/loader/example2.label | 4 + .../compose-go/v2/loader/extends.go | 221 ++ .../compose-spec/compose-go/v2/loader/fix.go | 36 + .../compose-go/v2/loader/full-example.yml | 461 +++ .../compose-go/v2/loader/include.go | 223 ++ .../compose-go/v2/loader/interpolate.go | 118 + .../compose-go/v2/loader/loader.go | 899 +++++ .../compose-go/v2/loader/mapstructure.go | 79 + .../compose-go/v2/loader/normalize.go | 266 ++ .../compose-go/v2/loader/omitEmpty.go | 75 + .../compose-go/v2/loader/paths.go | 50 + .../compose-go/v2/loader/reset.go | 196 ++ .../compose-go/v2/loader/validate.go | 218 ++ .../compose-go/v2/override/extends.go | 27 + .../compose-go/v2/override/merge.go | 307 ++ .../compose-go/v2/override/uncity.go | 229 ++ .../compose-go/v2/paths/context.go | 51 + .../compose-go/v2/paths/extends.go | 25 + .../compose-spec/compose-go/v2/paths/home.go | 37 + .../compose-go/v2/paths/resolve.go | 169 + .../compose-spec/compose-go/v2/paths/unix.go | 57 + .../compose-go/v2/paths/windows_path.go | 233 ++ .../compose-go/v2/schema/compose-spec.json | 1912 +++++++++++ .../compose-go/v2/schema/schema.go | 149 + .../compose-go/v2/schema/using-variables.yaml | 123 + .../compose-go/v2/template/template.go | 380 +++ .../compose-go/v2/template/variables.go | 157 + .../compose-go/v2/transform/build.go | 48 + .../compose-go/v2/transform/canonical.go | 137 + .../compose-go/v2/transform/defaults.go | 97 + .../compose-go/v2/transform/dependson.go | 53 + .../compose-go/v2/transform/device.go | 60 + .../compose-go/v2/transform/devices.go | 36 + .../compose-go/v2/transform/envfile.go | 55 + .../compose-go/v2/transform/extends.go | 36 + .../compose-go/v2/transform/external.go | 54 + .../compose-go/v2/transform/gpus.go | 38 + .../compose-go/v2/transform/include.go | 36 + .../compose-go/v2/transform/mapping.go | 46 + .../compose-go/v2/transform/ports.go | 104 + .../compose-go/v2/transform/secrets.go | 49 + .../compose-go/v2/transform/services.go | 41 + .../compose-go/v2/transform/ssh.go | 51 + .../compose-go/v2/transform/ulimits.go | 34 + .../compose-go/v2/transform/volume.go | 63 + .../compose-spec/compose-go/v2/tree/path.go | 87 + .../compose-spec/compose-go/v2/types/build.go | 48 + .../compose-spec/compose-go/v2/types/bytes.go | 48 + .../compose-go/v2/types/command.go | 86 + .../compose-go/v2/types/config.go | 145 + .../compose-spec/compose-go/v2/types/cpus.go | 48 + .../compose-go/v2/types/derived.gen.go | 2423 +++++++++++++ .../compose-go/v2/types/develop.go | 44 + .../compose-go/v2/types/device.go | 53 + .../compose-go/v2/types/duration.go | 62 + .../compose-go/v2/types/envfile.go | 23 + .../compose-go/v2/types/healthcheck.go | 53 + .../compose-spec/compose-go/v2/types/hooks.go | 28 + .../compose-go/v2/types/hostList.go | 144 + .../compose-go/v2/types/labels.go | 95 + .../compose-go/v2/types/mapping.go | 230 ++ .../compose-go/v2/types/models.go | 31 + .../compose-go/v2/types/options.go | 66 + .../compose-go/v2/types/project.go | 844 +++++ .../compose-go/v2/types/services.go | 45 + .../compose-spec/compose-go/v2/types/ssh.go | 73 + .../compose-go/v2/types/stringOrList.go | 61 + .../compose-spec/compose-go/v2/types/types.go | 878 +++++ .../compose-go/v2/utils/collectionutils.go | 66 + .../compose-go/v2/utils/pathutils.go | 91 + .../compose-spec/compose-go/v2/utils/set.go | 95 + .../compose-go/v2/utils/stringutils.go | 50 + .../compose-go/v2/validation/external.go | 49 + .../compose-go/v2/validation/validation.go | 119 + .../compose-go/v2/validation/volume.go | 39 + .../mattn/go-shellwords/.travis.yml | 16 + vendor/github.com/mattn/go-shellwords/LICENSE | 21 + .../github.com/mattn/go-shellwords/README.md | 55 + .../github.com/mattn/go-shellwords/go.test.sh | 12 + .../mattn/go-shellwords/shellwords.go | 317 ++ .../mattn/go-shellwords/util_posix.go | 29 + .../mattn/go-shellwords/util_windows.go | 29 + .../santhosh-tekuri/jsonschema/v6/.gitmodules | 4 + .../jsonschema/v6/.golangci.yml | 5 + .../jsonschema/v6/.pre-commit-hooks.yaml | 7 + .../santhosh-tekuri/jsonschema/v6/LICENSE | 175 + .../santhosh-tekuri/jsonschema/v6/README.md | 86 + .../santhosh-tekuri/jsonschema/v6/compiler.go | 332 ++ .../santhosh-tekuri/jsonschema/v6/content.go | 51 + .../santhosh-tekuri/jsonschema/v6/draft.go | 360 ++ .../santhosh-tekuri/jsonschema/v6/format.go | 708 ++++ .../santhosh-tekuri/jsonschema/v6/go.work | 8 + .../jsonschema/v6/kind/kind.go | 651 ++++ .../santhosh-tekuri/jsonschema/v6/loader.go | 266 ++ .../jsonschema/v6/metaschemas/draft-04/schema | 151 + .../jsonschema/v6/metaschemas/draft-06/schema | 150 + .../jsonschema/v6/metaschemas/draft-07/schema | 172 + .../metaschemas/draft/2019-09/meta/applicator | 55 + .../v6/metaschemas/draft/2019-09/meta/content | 15 + .../v6/metaschemas/draft/2019-09/meta/core | 56 + .../v6/metaschemas/draft/2019-09/meta/format | 13 + .../metaschemas/draft/2019-09/meta/meta-data | 35 + .../metaschemas/draft/2019-09/meta/validation | 97 + .../v6/metaschemas/draft/2019-09/schema | 41 + .../metaschemas/draft/2020-12/meta/applicator | 47 + .../v6/metaschemas/draft/2020-12/meta/content | 15 + .../v6/metaschemas/draft/2020-12/meta/core | 50 + .../draft/2020-12/meta/format-annotation | 13 + .../draft/2020-12/meta/format-assertion | 13 + .../metaschemas/draft/2020-12/meta/meta-data | 35 + .../draft/2020-12/meta/unevaluated | 14 + .../metaschemas/draft/2020-12/meta/validation | 97 + .../v6/metaschemas/draft/2020-12/schema | 57 + .../jsonschema/v6/objcompiler.go | 549 +++ .../santhosh-tekuri/jsonschema/v6/output.go | 212 ++ .../santhosh-tekuri/jsonschema/v6/position.go | 142 + .../santhosh-tekuri/jsonschema/v6/root.go | 202 ++ .../santhosh-tekuri/jsonschema/v6/roots.go | 289 ++ .../santhosh-tekuri/jsonschema/v6/schema.go | 248 ++ .../santhosh-tekuri/jsonschema/v6/util.go | 464 +++ .../jsonschema/v6/validator.go | 975 ++++++ .../santhosh-tekuri/jsonschema/v6/vocab.go | 106 + .../xhit/go-str2duration/v2/LICENSE | 27 + .../xhit/go-str2duration/v2/README.md | 88 + .../xhit/go-str2duration/v2/str2duration.go | 331 ++ vendor/go.yaml.in/yaml/v4/.gitignore | 3 + vendor/go.yaml.in/yaml/v4/.golangci.yaml | 46 + vendor/go.yaml.in/yaml/v4/.ls-lint.yaml | 16 + vendor/go.yaml.in/yaml/v4/.typos.toml | 21 + vendor/go.yaml.in/yaml/v4/.yamllint.yaml | 19 + vendor/go.yaml.in/yaml/v4/CONTRIBUTING.md | 160 + vendor/go.yaml.in/yaml/v4/GNUmakefile | 136 + vendor/go.yaml.in/yaml/v4/LICENSE | 202 ++ vendor/go.yaml.in/yaml/v4/NOTICE | 21 + vendor/go.yaml.in/yaml/v4/README.md | 227 ++ vendor/go.yaml.in/yaml/v4/decode.go | 1042 ++++++ vendor/go.yaml.in/yaml/v4/encode.go | 592 ++++ .../yaml/v4/internal/libyaml/api.go | 737 ++++ .../yaml/v4/internal/libyaml/doc.go | 5 + .../yaml/v4/internal/libyaml/emitter.go | 2064 +++++++++++ .../yaml/v4/internal/libyaml/parser.go | 1267 +++++++ .../yaml/v4/internal/libyaml/reader.go | 436 +++ .../yaml/v4/internal/libyaml/scanner.go | 3030 +++++++++++++++++ .../yaml/v4/internal/libyaml/writer.go | 43 + .../yaml/v4/internal/libyaml/yaml.go | 804 +++++ .../yaml/v4/internal/libyaml/yamlprivate.go | 241 ++ vendor/go.yaml.in/yaml/v4/resolve.go | 286 ++ vendor/go.yaml.in/yaml/v4/sorter.go | 134 + vendor/go.yaml.in/yaml/v4/yaml.go | 904 +++++ vendor/golang.org/x/sync/LICENSE | 27 + vendor/golang.org/x/sync/PATENTS | 22 + vendor/golang.org/x/sync/errgroup/errgroup.go | 151 + .../x/text/feature/plural/common.go | 70 + .../x/text/feature/plural/message.go | 244 ++ .../x/text/feature/plural/plural.go | 262 ++ .../x/text/feature/plural/tables.go | 552 +++ .../x/text/internal/catmsg/catmsg.go | 417 +++ .../x/text/internal/catmsg/codec.go | 407 +++ .../x/text/internal/catmsg/varint.go | 62 + .../x/text/internal/format/format.go | 41 + .../x/text/internal/format/parser.go | 358 ++ .../x/text/internal/number/common.go | 55 + .../x/text/internal/number/decimal.go | 500 +++ .../x/text/internal/number/format.go | 533 +++ .../x/text/internal/number/number.go | 152 + .../x/text/internal/number/pattern.go | 485 +++ .../internal/number/roundingmode_string.go | 30 + .../x/text/internal/number/tables.go | 1219 +++++++ .../x/text/internal/stringset/set.go | 86 + vendor/golang.org/x/text/message/catalog.go | 36 + .../x/text/message/catalog/catalog.go | 365 ++ .../golang.org/x/text/message/catalog/dict.go | 133 + vendor/golang.org/x/text/message/doc.go | 99 + vendor/golang.org/x/text/message/format.go | 510 +++ vendor/golang.org/x/text/message/message.go | 192 ++ vendor/golang.org/x/text/message/print.go | 984 ++++++ vendor/modules.txt | 43 + 231 files changed, 44914 insertions(+), 1478 deletions(-) delete mode 100644 pkg/upstream/convert/compose_test.go delete mode 100644 pkg/upstream/convert/service_test.go delete mode 100644 pkg/upstream/convert/volume_test.go create mode 100644 pkg/upstream/stack/loader_test.go create mode 100644 tests/resources/test_recipe/compose.interpolate.yml create mode 100644 vendor/github.com/compose-spec/compose-go/v2/LICENSE create mode 100644 vendor/github.com/compose-spec/compose-go/v2/NOTICE create mode 100644 vendor/github.com/compose-spec/compose-go/v2/cli/options.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/consts/consts.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/dotenv/LICENSE create mode 100644 vendor/github.com/compose-spec/compose-go/v2/dotenv/env.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/dotenv/format.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/dotenv/godotenv.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/dotenv/parser.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/errdefs/errors.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/format/volume.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/graph/cycle.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/graph/graph.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/graph/services.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/graph/traversal.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/interpolation/interpolation.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/loader/environment.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/loader/example1.env create mode 100644 vendor/github.com/compose-spec/compose-go/v2/loader/example1.label create mode 100644 vendor/github.com/compose-spec/compose-go/v2/loader/example2.env create mode 100644 vendor/github.com/compose-spec/compose-go/v2/loader/example2.label create mode 100644 vendor/github.com/compose-spec/compose-go/v2/loader/extends.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/loader/fix.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/loader/full-example.yml create mode 100644 vendor/github.com/compose-spec/compose-go/v2/loader/include.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/loader/interpolate.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/loader/loader.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/loader/mapstructure.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/loader/normalize.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/loader/omitEmpty.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/loader/paths.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/loader/reset.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/loader/validate.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/override/extends.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/override/merge.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/override/uncity.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/paths/context.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/paths/extends.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/paths/home.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/paths/resolve.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/paths/unix.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/paths/windows_path.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/schema/compose-spec.json create mode 100644 vendor/github.com/compose-spec/compose-go/v2/schema/schema.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/schema/using-variables.yaml create mode 100644 vendor/github.com/compose-spec/compose-go/v2/template/template.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/template/variables.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/transform/build.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/transform/canonical.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/transform/defaults.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/transform/dependson.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/transform/device.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/transform/devices.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/transform/envfile.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/transform/extends.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/transform/external.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/transform/gpus.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/transform/include.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/transform/mapping.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/transform/ports.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/transform/secrets.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/transform/services.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/transform/ssh.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/transform/ulimits.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/transform/volume.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/tree/path.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/types/build.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/types/bytes.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/types/command.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/types/config.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/types/cpus.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/types/derived.gen.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/types/develop.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/types/device.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/types/duration.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/types/envfile.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/types/healthcheck.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/types/hooks.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/types/hostList.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/types/labels.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/types/mapping.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/types/models.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/types/options.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/types/project.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/types/services.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/types/ssh.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/types/stringOrList.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/types/types.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/utils/collectionutils.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/utils/pathutils.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/utils/set.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/utils/stringutils.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/validation/external.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/validation/validation.go create mode 100644 vendor/github.com/compose-spec/compose-go/v2/validation/volume.go create mode 100644 vendor/github.com/mattn/go-shellwords/.travis.yml create mode 100644 vendor/github.com/mattn/go-shellwords/LICENSE create mode 100644 vendor/github.com/mattn/go-shellwords/README.md create mode 100644 vendor/github.com/mattn/go-shellwords/go.test.sh create mode 100644 vendor/github.com/mattn/go-shellwords/shellwords.go create mode 100644 vendor/github.com/mattn/go-shellwords/util_posix.go create mode 100644 vendor/github.com/mattn/go-shellwords/util_windows.go create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v6/.gitmodules create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v6/.golangci.yml create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v6/.pre-commit-hooks.yaml create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v6/LICENSE create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v6/README.md create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v6/compiler.go create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v6/content.go create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v6/draft.go create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v6/format.go create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v6/go.work create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v6/kind/kind.go create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v6/loader.go create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft-04/schema create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft-06/schema create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft-07/schema create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2019-09/meta/applicator create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2019-09/meta/content create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2019-09/meta/core create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2019-09/meta/format create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2019-09/meta/meta-data create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2019-09/meta/validation create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2019-09/schema create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/applicator create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/content create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/core create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/format-annotation create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/format-assertion create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/meta-data create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/unevaluated create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/validation create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/schema create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v6/objcompiler.go create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v6/output.go create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v6/position.go create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v6/root.go create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v6/roots.go create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v6/schema.go create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v6/util.go create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v6/validator.go create mode 100644 vendor/github.com/santhosh-tekuri/jsonschema/v6/vocab.go create mode 100644 vendor/github.com/xhit/go-str2duration/v2/LICENSE create mode 100644 vendor/github.com/xhit/go-str2duration/v2/README.md create mode 100644 vendor/github.com/xhit/go-str2duration/v2/str2duration.go create mode 100644 vendor/go.yaml.in/yaml/v4/.gitignore create mode 100644 vendor/go.yaml.in/yaml/v4/.golangci.yaml create mode 100644 vendor/go.yaml.in/yaml/v4/.ls-lint.yaml create mode 100644 vendor/go.yaml.in/yaml/v4/.typos.toml create mode 100644 vendor/go.yaml.in/yaml/v4/.yamllint.yaml create mode 100644 vendor/go.yaml.in/yaml/v4/CONTRIBUTING.md create mode 100644 vendor/go.yaml.in/yaml/v4/GNUmakefile create mode 100644 vendor/go.yaml.in/yaml/v4/LICENSE create mode 100644 vendor/go.yaml.in/yaml/v4/NOTICE create mode 100644 vendor/go.yaml.in/yaml/v4/README.md create mode 100644 vendor/go.yaml.in/yaml/v4/decode.go create mode 100644 vendor/go.yaml.in/yaml/v4/encode.go create mode 100644 vendor/go.yaml.in/yaml/v4/internal/libyaml/api.go create mode 100644 vendor/go.yaml.in/yaml/v4/internal/libyaml/doc.go create mode 100644 vendor/go.yaml.in/yaml/v4/internal/libyaml/emitter.go create mode 100644 vendor/go.yaml.in/yaml/v4/internal/libyaml/parser.go create mode 100644 vendor/go.yaml.in/yaml/v4/internal/libyaml/reader.go create mode 100644 vendor/go.yaml.in/yaml/v4/internal/libyaml/scanner.go create mode 100644 vendor/go.yaml.in/yaml/v4/internal/libyaml/writer.go create mode 100644 vendor/go.yaml.in/yaml/v4/internal/libyaml/yaml.go create mode 100644 vendor/go.yaml.in/yaml/v4/internal/libyaml/yamlprivate.go create mode 100644 vendor/go.yaml.in/yaml/v4/resolve.go create mode 100644 vendor/go.yaml.in/yaml/v4/sorter.go create mode 100644 vendor/go.yaml.in/yaml/v4/yaml.go create mode 100644 vendor/golang.org/x/sync/LICENSE create mode 100644 vendor/golang.org/x/sync/PATENTS create mode 100644 vendor/golang.org/x/sync/errgroup/errgroup.go create mode 100644 vendor/golang.org/x/text/feature/plural/common.go create mode 100644 vendor/golang.org/x/text/feature/plural/message.go create mode 100644 vendor/golang.org/x/text/feature/plural/plural.go create mode 100644 vendor/golang.org/x/text/feature/plural/tables.go create mode 100644 vendor/golang.org/x/text/internal/catmsg/catmsg.go create mode 100644 vendor/golang.org/x/text/internal/catmsg/codec.go create mode 100644 vendor/golang.org/x/text/internal/catmsg/varint.go create mode 100644 vendor/golang.org/x/text/internal/format/format.go create mode 100644 vendor/golang.org/x/text/internal/format/parser.go create mode 100644 vendor/golang.org/x/text/internal/number/common.go create mode 100644 vendor/golang.org/x/text/internal/number/decimal.go create mode 100644 vendor/golang.org/x/text/internal/number/format.go create mode 100644 vendor/golang.org/x/text/internal/number/number.go create mode 100644 vendor/golang.org/x/text/internal/number/pattern.go create mode 100644 vendor/golang.org/x/text/internal/number/roundingmode_string.go create mode 100644 vendor/golang.org/x/text/internal/number/tables.go create mode 100644 vendor/golang.org/x/text/internal/stringset/set.go create mode 100644 vendor/golang.org/x/text/message/catalog.go create mode 100644 vendor/golang.org/x/text/message/catalog/catalog.go create mode 100644 vendor/golang.org/x/text/message/catalog/dict.go create mode 100644 vendor/golang.org/x/text/message/doc.go create mode 100644 vendor/golang.org/x/text/message/format.go create mode 100644 vendor/golang.org/x/text/message/message.go create mode 100644 vendor/golang.org/x/text/message/print.go diff --git a/cli/app/deploy.go b/cli/app/deploy.go index 6fbf7cfa..cc552ff1 100644 --- a/cli/app/deploy.go +++ b/cli/app/deploy.go @@ -157,7 +157,8 @@ checkout as-is. Recipe commit hashes are also supported as values for ResolveImage: stack.ResolveImageAlways, Detach: false, } - compose, err := appPkg.GetAppComposeConfig(app.Name, deployOpts, app.Env) + + compose, err := appPkg.GetAppComposeConfig(composeFiles, app.Env) if err != nil { log.Fatal(err) } diff --git a/cli/app/labels.go b/cli/app/labels.go index 08f47ebe..5a9ccde8 100644 --- a/cli/app/labels.go +++ b/cli/app/labels.go @@ -13,7 +13,7 @@ import ( "coopcloud.tech/abra/pkg/i18n" "coopcloud.tech/abra/pkg/log" "coopcloud.tech/abra/pkg/upstream/convert" - composetypes "github.com/docker/cli/cli/compose/types" + composeGoTypes "github.com/compose-spec/compose-go/v2/types" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" dockerClient "github.com/docker/docker/client" @@ -80,13 +80,13 @@ var AppLabelsCommand = &cobra.Command{ rows = append(rows, []string{i18n.G("RECIPE LABELS"), "---"}) - config, err := app.Recipe.GetComposeConfig(app.Env) + config, err := app.Recipe.GetComposeConfig() if err != nil { log.Fatal(err) } var localLabelKeys []string - var appServiceConfig composetypes.ServiceConfig + var appServiceConfig composeGoTypes.ServiceConfig for _, service := range config.Services { if service.Name == "app" { appServiceConfig = service diff --git a/cli/app/move.go b/cli/app/move.go index 60cafb65..d02e4247 100644 --- a/cli/app/move.go +++ b/cli/app/move.go @@ -262,8 +262,7 @@ func getAppResources(cl *dockerclient.Client, app app.App) (*AppResources, error return nil, err } - opts := stack.Deploy{Composefiles: composeFiles, Namespace: app.StackName()} - compose, err := appPkg.GetAppComposeConfig(app.Name, opts, app.Env) + compose, err := appPkg.GetAppComposeConfig(composeFiles, app.Env) if err != nil { return nil, err } diff --git a/cli/app/ps.go b/cli/app/ps.go index 87b27375..3e4248d1 100644 --- a/cli/app/ps.go +++ b/cli/app/ps.go @@ -4,7 +4,8 @@ import ( "context" "encoding/json" "fmt" - "sort" + "maps" + "slices" "strings" "coopcloud.tech/abra/cli/internal" @@ -87,26 +88,19 @@ func showPSOutput(app appPkg.App, cl *dockerClient.Client, deployedVersion, chao return } - deployOpts := stack.Deploy{ - Composefiles: composeFiles, - Namespace: app.StackName(), - Prune: false, - ResolveImage: stack.ResolveImageAlways, - } - compose, err := appPkg.GetAppComposeConfig(app.Name, deployOpts, app.Env) + compose, err := appPkg.GetAppComposeConfig(composeFiles, app.Env) if err != nil { log.Fatal(err) return } services := compose.Services - sort.Slice(services, func(i, j int) bool { - return services[i].Name < services[j].Name - }) var rows [][]string allContainerStats := make(map[string]map[string]string) - for _, service := range services { + for _, serviceName := range slices.Sorted(maps.Keys(compose.Services)) { + service := services[serviceName] + filters := filters.NewArgs() filters.Add("name", fmt.Sprintf("^%s_%s", app.StackName(), service.Name)) diff --git a/cli/app/rollback.go b/cli/app/rollback.go index 9fd06a0b..6cc32c7e 100644 --- a/cli/app/rollback.go +++ b/cli/app/rollback.go @@ -173,7 +173,7 @@ beforehand. See "abra app backup" for more.`), Detach: false, } - compose, err := appPkg.GetAppComposeConfig(app.Name, deployOpts, app.Env) + compose, err := appPkg.GetAppComposeConfig(composeFiles, app.Env) if err != nil { log.Fatal(err) } diff --git a/cli/app/undeploy.go b/cli/app/undeploy.go index 4c0ca446..180be1cd 100644 --- a/cli/app/undeploy.go +++ b/cli/app/undeploy.go @@ -89,8 +89,7 @@ Passing "--prune/-p" does not remove those volumes.`), log.Fatal(err) } - opts := stack.Deploy{Composefiles: composeFiles, Namespace: stackName} - compose, err := appPkg.GetAppComposeConfig(app.Name, opts, app.Env) + compose, err := appPkg.GetAppComposeConfig(composeFiles, app.Env) if err != nil { log.Fatal(err) } diff --git a/cli/app/upgrade.go b/cli/app/upgrade.go index d5e5513a..940c780e 100644 --- a/cli/app/upgrade.go +++ b/cli/app/upgrade.go @@ -185,7 +185,7 @@ beforehand. See "abra app backup" for more.`), Detach: false, } - compose, err := appPkg.GetAppComposeConfig(app.Name, deployOpts, app.Env) + compose, err := appPkg.GetAppComposeConfig(composeFiles, app.Env) if err != nil { log.Fatal(err) } diff --git a/cli/internal/recipe.go b/cli/internal/recipe.go index 5cadadf6..669f1bf8 100644 --- a/cli/internal/recipe.go +++ b/cli/internal/recipe.go @@ -92,10 +92,11 @@ func SetBumpType(bumpType string) { func GetMainAppImage(recipe recipe.Recipe) (string, error) { var path string - config, err := recipe.GetComposeConfig(nil) + config, err := recipe.GetComposeConfig() if err != nil { return "", err } + for _, service := range config.Services { if service.Name == "app" { img, err := reference.ParseNormalizedNamed(service.Image) diff --git a/cli/internal/validate.go b/cli/internal/validate.go index f39b9d34..2ae73bf0 100644 --- a/cli/internal/validate.go +++ b/cli/internal/validate.go @@ -70,21 +70,6 @@ func ValidateRecipe(args []string, cmdName string) recipe.Recipe { log.Fatal(err) } - _, err = chosenRecipe.GetComposeConfig(nil) - if err != nil { - if cmdName == i18n.G("generate") { - if strings.Contains(err.Error(), "missing a compose") { - log.Fatal(err) - } - log.Warn(err) - } else { - if strings.Contains(err.Error(), "template_driver is not allowed") { - log.Warn(i18n.G("ensure %s recipe compose.* files include \"version: '3.8'\"", recipeName)) - } - log.Fatal(i18n.G("unable to validate recipe: %s", err)) - } - } - log.Debug(i18n.G("validated %s as recipe argument", recipeName)) return chosenRecipe diff --git a/cli/recipe/release.go b/cli/recipe/release.go index 250f2b0b..aa7f2a1f 100644 --- a/cli/recipe/release.go +++ b/cli/recipe/release.go @@ -312,10 +312,11 @@ likely to change. func GetImageVersions(recipe recipePkg.Recipe) (map[string]string, error) { services := make(map[string]string) - config, err := recipe.GetComposeConfig(nil) + config, err := recipe.GetComposeConfig() if err != nil { return nil, err } + missingTag := false for _, service := range config.Services { if service.Image == "" { diff --git a/cli/recipe/upgrade.go b/cli/recipe/upgrade.go index 9e458d0e..c21453e9 100644 --- a/cli/recipe/upgrade.go +++ b/cli/recipe/upgrade.go @@ -124,7 +124,7 @@ interface.`), log.Debug(i18n.G("did not find versions file for %s", recipe.Name)) } - config, err := recipe.GetComposeConfig(nil) + config, err := recipe.GetComposeConfig() if err != nil { log.Fatal(err) } diff --git a/go.mod b/go.mod index 7ad4ba44..824400c7 100644 --- a/go.mod +++ b/go.mod @@ -9,6 +9,7 @@ require ( github.com/charmbracelet/bubbletea v1.3.10 github.com/charmbracelet/lipgloss v1.1.0 github.com/charmbracelet/log v1.0.0 + github.com/compose-spec/compose-go/v2 v2.10.1 github.com/distribution/reference v0.6.0 github.com/docker/cli v28.4.0+incompatible github.com/docker/docker v28.5.2+incompatible @@ -80,6 +81,7 @@ require ( github.com/mattn/go-isatty v0.0.20 // indirect github.com/mattn/go-localereader v0.0.1 // indirect github.com/mattn/go-runewidth v0.0.21 // indirect + github.com/mattn/go-shellwords v1.0.12 // indirect github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d // indirect github.com/miekg/pkcs11 v1.1.1 // indirect github.com/mitchellh/colorstring v0.0.0-20190213212951-d06e56a500db // indirect @@ -103,12 +105,14 @@ require ( github.com/prometheus/procfs v0.20.1 // indirect github.com/rivo/uniseg v0.4.7 // indirect github.com/russross/blackfriday/v2 v2.1.0 // indirect + github.com/santhosh-tekuri/jsonschema/v6 v6.0.1 // indirect github.com/sirupsen/logrus v1.9.4 // indirect github.com/skeema/knownhosts v1.3.2 // indirect github.com/spf13/pflag v1.0.10 // indirect github.com/xanzy/ssh-agent v0.3.3 // indirect github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect github.com/xeipuuv/gojsonschema v1.2.0 // indirect + github.com/xhit/go-str2duration/v2 v2.1.0 // indirect github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect go.opentelemetry.io/auto/sdk v1.2.1 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.67.0 // indirect @@ -124,9 +128,11 @@ require ( go.opentelemetry.io/proto/otlp v1.10.0 // indirect go.yaml.in/yaml/v2 v2.4.4 // indirect go.yaml.in/yaml/v3 v3.0.4 // indirect + go.yaml.in/yaml/v4 v4.0.0-rc.3 // indirect golang.org/x/crypto v0.49.0 // indirect golang.org/x/exp v0.0.0-20260312153236-7ab1446f8b90 // indirect golang.org/x/net v0.52.0 // indirect + golang.org/x/sync v0.20.0 // indirect golang.org/x/text v0.35.0 // indirect golang.org/x/time v0.15.0 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20260401024825-9d38bb4040a9 // indirect diff --git a/go.sum b/go.sum index 118add0f..de231018 100644 --- a/go.sum +++ b/go.sum @@ -176,6 +176,8 @@ github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGX github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= +github.com/compose-spec/compose-go/v2 v2.10.1 h1:mFbXobojGRFIVi1UknrvaDAZ+PkJfyjqkA1yseh+vAU= +github.com/compose-spec/compose-go/v2 v2.10.1/go.mod h1:Ohac1SzhO/4fXXrzWIztIVB6ckmKBv1Nt5Z5mGVESUg= github.com/containerd/aufs v0.0.0-20200908144142-dab0cbea06f4/go.mod h1:nukgQABAEopAHvB6j7cnP5zJ+/3aVcE7hCYqvIwAHyE= github.com/containerd/aufs v0.0.0-20201003224125-76a6863f2989/go.mod h1:AkGGQs9NM2vtYHaUen+NljV0/baGCAPELGm2q9ZXpWU= github.com/containerd/aufs v0.0.0-20210316121734-20793ff83c97/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU= @@ -318,6 +320,8 @@ github.com/dgrijalva/jwt-go v0.0.0-20170104182250-a601269ab70c/go.mod h1:E3ru+11 github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= +github.com/dlclark/regexp2 v1.11.0 h1:G/nrcoOa7ZXlpoa/91N3X7mM3r8eIlMBBJZvsz/mxKI= +github.com/dlclark/regexp2 v1.11.0/go.mod h1:DHkYz0B9wPfa6wondMfaivmHpzrQ3v9q8cnmRbL6yW8= github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E= github.com/docker/cli v0.0.0-20191017083524-a8ff7f821017/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY= @@ -632,6 +636,7 @@ github.com/mattn/go-runewidth v0.0.21 h1:jJKAZiQH+2mIinzCJIaIG9Be1+0NR+5sz/lYEEj github.com/mattn/go-runewidth v0.0.21/go.mod h1:XBkDxAl56ILZc9knddidhrOlY5R/pDhgLpndooCuJAs= github.com/mattn/go-shellwords v1.0.3/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= github.com/mattn/go-shellwords v1.0.6/go.mod h1:3xCvwCdWdlDJUrvuMn7Wuy9eWs4pE8vqg+NOMyg4B2o= +github.com/mattn/go-shellwords v1.0.12 h1:M2zGm7EW6UQJvDeQxo4T51eKPurbeFbe8WtebGE2xrk= github.com/mattn/go-shellwords v1.0.12/go.mod h1:EZzvwXDESEeg03EKmM+RmDnNOPKG4lLtQsUlTZDWQ8Y= github.com/mattn/go-sqlite3 v1.6.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= @@ -807,6 +812,8 @@ github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQD github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/safchain/ethtool v0.0.0-20190326074333-42ed695e3de8/go.mod h1:Z0q5wiBQGYcxhMZ6gUqHn6pYNLypFAvaL3UvgZLR0U4= +github.com/santhosh-tekuri/jsonschema/v6 v6.0.1 h1:PKK9DyHxif4LZo+uQSgXNqs0jj5+xZwwfKHgph2lxBw= +github.com/santhosh-tekuri/jsonschema/v6 v6.0.1/go.mod h1:JXeL+ps8p7/KNMjDQk3TCwPpBy0wYklyWTfbkIzdIFU= github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0= github.com/schollz/progressbar/v3 v3.19.0 h1:Ea18xuIRQXLAUidVDox3AbwfUhD0/1IvohyTutOIFoc= github.com/schollz/progressbar/v3 v3.19.0/go.mod h1:IsO3lpbaGuzh8zIMzgY3+J8l4C8GjO0Y9S69eFvNsec= @@ -894,6 +901,8 @@ github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415/go.mod h1: github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs= github.com/xeipuuv/gojsonschema v1.2.0 h1:LhYJRs+L4fBtjZUfuSZIKGeVu0QRy8e5Xi7D17UxZ74= github.com/xeipuuv/gojsonschema v1.2.0/go.mod h1:anYRn/JVcOK2ZgGU+IjEV4nwlhoK5sQluxsYJ78Id3Y= +github.com/xhit/go-str2duration/v2 v2.1.0 h1:lxklc02Drh6ynqX+DdPyp5pCKLUQpRT8bp8Ydu2Bstc= +github.com/xhit/go-str2duration/v2 v2.1.0/go.mod h1:ohY8p+0f07DiV6Em5LKB0s2YpLtXVyJfNt1+BlmyAsU= github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU= github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e h1:JVG44RsyaB9T2KIHavMF/ppJZNG9ZpyihvCd0w101no= github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e/go.mod h1:RbqR21r5mrJuqunuUZ/Dhy/avygyECGrLceyNeo4LiM= @@ -948,6 +957,8 @@ go.yaml.in/yaml/v2 v2.4.4 h1:tuyd0P+2Ont/d6e2rl3be67goVK4R6deVxCUX5vyPaQ= go.yaml.in/yaml/v2 v2.4.4/go.mod h1:gMZqIpDtDqOfM0uNfy0SkpRhvUryYH0Z6wdMYcacYXQ= go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc= go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg= +go.yaml.in/yaml/v4 v4.0.0-rc.3 h1:3h1fjsh1CTAPjW7q/EMe+C8shx5d8ctzZTrLcs/j8Go= +go.yaml.in/yaml/v4 v4.0.0-rc.3/go.mod h1:aZqd9kCMsGL7AuUv/m/PvWLdg5sjJsZ4oHDEnfPPfY0= golang.org/x/crypto v0.0.0-20171113213409-9f005a07e0d3/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= golang.org/x/crypto v0.0.0-20181009213950-7c1a557ab941/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= @@ -1062,6 +1073,8 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.20.0 h1:e0PTpb7pjO8GAtTs2dQ6jYa5BWYlMuX047Dco/pItO4= +golang.org/x/sync v0.20.0/go.mod h1:9xrNwdLfx4jkKbNva9FpL6vEN7evnE43NNNJQ2LF3+0= golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= diff --git a/pkg/app/app.go b/pkg/app/app.go index 2b723f9f..4755a9f4 100644 --- a/pkg/app/app.go +++ b/pkg/app/app.go @@ -18,10 +18,10 @@ import ( "coopcloud.tech/abra/pkg/recipe" "coopcloud.tech/abra/pkg/upstream/convert" "coopcloud.tech/abra/pkg/upstream/stack" + composeGoTypes "github.com/compose-spec/compose-go/v2/types" "coopcloud.tech/abra/pkg/log" loader "coopcloud.tech/abra/pkg/upstream/stack" - composetypes "github.com/docker/cli/cli/compose/types" "github.com/docker/docker/api/types/filters" "github.com/schollz/progressbar/v3" ) @@ -179,8 +179,7 @@ func (a App) Filters(appendServiceNames, exactMatch bool, services ...string) (f return filters, err } - opts := stack.Deploy{Composefiles: composeFiles} - compose, err := GetAppComposeConfig(a.Recipe.Name, opts, a.Env) + compose, err := GetAppComposeConfig(composeFiles, a.Env) if err != nil { return filters, err } @@ -333,8 +332,7 @@ func GetAppServiceNames(appName string) ([]string, error) { return serviceNames, err } - opts := stack.Deploy{Composefiles: composeFiles} - compose, err := GetAppComposeConfig(app.Recipe.Name, opts, app.Env) + compose, err := GetAppComposeConfig(composeFiles, app.Env) if err != nil { return serviceNames, err } @@ -490,13 +488,18 @@ func GetAppStatuses(apps []App, MachineReadable bool) (map[string]map[string]str // GetAppComposeConfig retrieves a compose specification for a recipe. This // specification is the result of a merge of all the compose.**.yml files in // the recipe repository. -func GetAppComposeConfig(recipe string, opts stack.Deploy, appEnv envfile.AppEnv) (*composetypes.Config, error) { - compose, err := loader.LoadComposefile(opts, appEnv) +func GetAppComposeConfig(composeFiles []string, appEnv envfile.AppEnv) (*composeGoTypes.Project, error) { + compose, err := loader.LoadCompose(loader.LoadConf{ComposeFiles: composeFiles, AppEnv: appEnv}) if err != nil { - return &composetypes.Config{}, err + return &composeGoTypes.Project{}, err } - log.Debug(i18n.G("retrieved %s for %s", compose.Filename, recipe)) + recipeName, exists := appEnv["RECIPE"] + if !exists { + recipeName, _ = appEnv["TYPE"] + } + + log.Debug(i18n.G("retrieved %s for %s", compose.Name, recipeName)) return compose, nil } @@ -504,7 +507,7 @@ func GetAppComposeConfig(recipe string, opts stack.Deploy, appEnv envfile.AppEnv // ExposeAllEnv exposes all env variables to the app container func ExposeAllEnv( stackName string, - compose *composetypes.Config, + compose *composeGoTypes.Project, appEnv envfile.AppEnv, toDeployVersion string) { for _, service := range compose.Services { diff --git a/pkg/app/compose.go b/pkg/app/compose.go index 82066c4c..d5341d89 100644 --- a/pkg/app/compose.go +++ b/pkg/app/compose.go @@ -7,12 +7,12 @@ import ( "coopcloud.tech/abra/pkg/i18n" "coopcloud.tech/abra/pkg/log" - composetypes "github.com/docker/cli/cli/compose/types" + composeGoTypes "github.com/compose-spec/compose-go/v2/types" ) // SetRecipeLabel adds the label 'coop-cloud.${STACK_NAME}.recipe=${RECIPE}' to the app container // to signal which recipe is connected to the deployed app -func SetRecipeLabel(compose *composetypes.Config, stackName string, recipe string) { +func SetRecipeLabel(compose *composeGoTypes.Project, stackName string, recipe string) { for _, service := range compose.Services { if service.Name == "app" { log.Debug(i18n.G("set recipe label 'coop-cloud.%s.recipe' to %s for %s", stackName, recipe, stackName)) @@ -24,7 +24,7 @@ func SetRecipeLabel(compose *composetypes.Config, stackName string, recipe strin // SetChaosLabel adds the label 'coop-cloud.${STACK_NAME}.chaos=true/false' to the app container // to signal if the app is deployed in chaos mode -func SetChaosLabel(compose *composetypes.Config, stackName string, chaos bool) { +func SetChaosLabel(compose *composeGoTypes.Project, stackName string, chaos bool) { for _, service := range compose.Services { if service.Name == "app" { log.Debug(i18n.G("set label 'coop-cloud.%s.chaos' to %v for %s", stackName, chaos, stackName)) @@ -35,7 +35,7 @@ func SetChaosLabel(compose *composetypes.Config, stackName string, chaos bool) { } // SetChaosVersionLabel adds the label 'coop-cloud.${STACK_NAME}.chaos-version=$(GIT_COMMIT)' to the app container -func SetChaosVersionLabel(compose *composetypes.Config, stackName string, chaosVersion string) { +func SetChaosVersionLabel(compose *composeGoTypes.Project, stackName string, chaosVersion string) { for _, service := range compose.Services { if service.Name == "app" { log.Debug(i18n.G("set label 'coop-cloud.%s.chaos-version' to %v for %s", stackName, chaosVersion, stackName)) @@ -45,7 +45,7 @@ func SetChaosVersionLabel(compose *composetypes.Config, stackName string, chaosV } } -func SetVersionLabel(compose *composetypes.Config, stackName string, version string) { +func SetVersionLabel(compose *composeGoTypes.Project, stackName string, version string) { for _, service := range compose.Services { if service.Name == "app" { log.Debug(i18n.G("set label 'coop-cloud.%s.version' to %v for %s", stackName, version, stackName)) @@ -56,7 +56,7 @@ func SetVersionLabel(compose *composetypes.Config, stackName string, version str } // GetLabel reads docker labels in the format of "coop-cloud.${STACK_NAME}.${LABEL}" from the local compose files -func GetLabel(compose *composetypes.Config, stackName string, label string) string { +func GetLabel(compose *composeGoTypes.Project, stackName string, label string) string { for _, service := range compose.Services { if service.Name == "app" { labelKey := fmt.Sprintf("coop-cloud.%s.%s", stackName, label) @@ -73,7 +73,7 @@ func GetLabel(compose *composetypes.Config, stackName string, label string) stri // GetTimeoutFromLabel reads the timeout value from docker label // `coop-cloud.${STACK_NAME}.timeout=...` if present. A value is present if the // operator uses a `TIMEOUT=...` in their app env. -func GetTimeoutFromLabel(compose *composetypes.Config, stackName string) (int, error) { +func GetTimeoutFromLabel(compose *composeGoTypes.Project, stackName string) (int, error) { var timeout int if timeoutLabel := GetLabel(compose, stackName, "timeout"); timeoutLabel != "" { diff --git a/pkg/app/compose_test.go b/pkg/app/compose_test.go index 81eeee36..a7684989 100644 --- a/pkg/app/compose_test.go +++ b/pkg/app/compose_test.go @@ -6,7 +6,6 @@ import ( appPkg "coopcloud.tech/abra/pkg/app" "coopcloud.tech/abra/pkg/test" testPkg "coopcloud.tech/abra/pkg/test" - stack "coopcloud.tech/abra/pkg/upstream/stack" "github.com/stretchr/testify/assert" ) @@ -40,15 +39,8 @@ func TestGetTimeoutFromLabel(t *testing.T) { t.Fatal(err) } - deployOpts := stack.Deploy{ - Composefiles: composeFiles, - Namespace: app.StackName(), - Prune: false, - ResolveImage: stack.ResolveImageAlways, - Detach: false, - } - - compose, err := appPkg.GetAppComposeConfig(app.Name, deployOpts, app.Env) + app.Env["STACK_NAME"] = app.StackName() + compose, err := appPkg.GetAppComposeConfig(composeFiles, app.Env) if err != nil { t.Fatal(err) } diff --git a/pkg/autocomplete/autocomplete.go b/pkg/autocomplete/autocomplete.go index ace0ac71..df7ca804 100644 --- a/pkg/autocomplete/autocomplete.go +++ b/pkg/autocomplete/autocomplete.go @@ -120,7 +120,7 @@ func CommandNameComplete(appName string) ([]string, cobra.ShellCompDirective) { func SecretComplete(recipeName string) ([]string, cobra.ShellCompDirective) { r := recipe.Get(recipeName) - config, err := r.GetComposeConfig(nil) + config, err := r.GetComposeConfig() if err != nil { err := i18n.G("autocomplete failed: %s", err) return []string{err}, cobra.ShellCompDirectiveError diff --git a/pkg/deploy/utils.go b/pkg/deploy/utils.go index 284e2bc1..c4599234 100644 --- a/pkg/deploy/utils.go +++ b/pkg/deploy/utils.go @@ -14,8 +14,8 @@ import ( "coopcloud.tech/abra/pkg/recipe" "coopcloud.tech/abra/pkg/secret" + composeGoTypes "github.com/compose-spec/compose-go/v2/types" "github.com/distribution/reference" - composetypes "github.com/docker/cli/cli/compose/types" "github.com/docker/docker/api/types/swarm" dockerClient "github.com/docker/docker/client" ) @@ -229,7 +229,7 @@ func GatherSecretsForDeploy(cl *dockerClient.Client, app appPkg.App, showUnchang return secretInfo, nil } -func GatherConfigsForDeploy(cl *dockerClient.Client, app appPkg.App, compose *composetypes.Config, abraShEnv map[string]string, showUnchanged bool) ([]string, error) { +func GatherConfigsForDeploy(cl *dockerClient.Client, app appPkg.App, compose *composeGoTypes.Project, abraShEnv map[string]string, showUnchanged bool) ([]string, error) { // Get current configs from existing deployment currentConfigs, err := GetConfigsForStack(cl, app) if err != nil { @@ -268,7 +268,7 @@ func GatherConfigsForDeploy(cl *dockerClient.Client, app appPkg.App, compose *co return configInfo, nil } -func GatherImagesForDeploy(cl *dockerClient.Client, app appPkg.App, compose *composetypes.Config, showUnchanged bool) ([]string, error) { +func GatherImagesForDeploy(cl *dockerClient.Client, app appPkg.App, compose *composeGoTypes.Project, showUnchanged bool) ([]string, error) { // Get current images from existing deployment currentImages, err := GetImagesForStack(cl, app) if err != nil { diff --git a/pkg/lint/recipe.go b/pkg/lint/recipe.go index e81c288c..0b2ef352 100644 --- a/pkg/lint/recipe.go +++ b/pkg/lint/recipe.go @@ -62,13 +62,6 @@ func (l LintRule) Skip(recipe recipe.Recipe) bool { var LintRules = map[string][]LintRule{ "warn": { - { - Ref: "R001", - Level: i18n.G("warn"), - Description: i18n.G("compose config has expected version"), - HowToResolve: i18n.G("ensure 'version: \"3.8\"' in compose configs"), - Function: LintComposeVersion, - }, { Ref: "R002", Level: i18n.G("warn"), @@ -217,18 +210,6 @@ func LintForErrors(recipe recipe.Recipe) error { return nil } -func LintComposeVersion(recipe recipe.Recipe) (bool, error) { - config, err := recipe.GetComposeConfig(nil) - if err != nil { - return false, err - } - if config.Version == "3.8" { - return true, nil - } - - return true, nil -} - func LintEnvConfigPresent(r recipe.Recipe) (bool, error) { if _, err := os.Stat(r.SampleEnvPath); !os.IsNotExist(err) { return true, nil @@ -238,7 +219,7 @@ func LintEnvConfigPresent(r recipe.Recipe) (bool, error) { } func LintAppService(recipe recipe.Recipe) (bool, error) { - config, err := recipe.GetComposeConfig(nil) + config, err := recipe.GetComposeConfig() if err != nil { return false, err } @@ -269,7 +250,7 @@ func LintTraefikEnabledSkipCondition(r recipe.Recipe) (bool, error) { } func LintTraefikEnabled(recipe recipe.Recipe) (bool, error) { - config, err := recipe.GetComposeConfig(nil) + config, err := recipe.GetComposeConfig() if err != nil { return false, err } @@ -287,7 +268,7 @@ func LintTraefikEnabled(recipe recipe.Recipe) (bool, error) { } func LintDeployLabelsPresent(recipe recipe.Recipe) (bool, error) { - config, err := recipe.GetComposeConfig(nil) + config, err := recipe.GetComposeConfig() if err != nil { return false, err } @@ -302,7 +283,7 @@ func LintDeployLabelsPresent(recipe recipe.Recipe) (bool, error) { } func LintHealthchecks(recipe recipe.Recipe) (bool, error) { - config, err := recipe.GetComposeConfig(nil) + config, err := recipe.GetComposeConfig() if err != nil { return false, err } @@ -316,7 +297,7 @@ func LintHealthchecks(recipe recipe.Recipe) (bool, error) { } func LintAllImagesTagged(recipe recipe.Recipe) (bool, error) { - config, err := recipe.GetComposeConfig(nil) + config, err := recipe.GetComposeConfig() if err != nil { return false, err } @@ -334,7 +315,7 @@ func LintAllImagesTagged(recipe recipe.Recipe) (bool, error) { } func LintNoUnstableTags(recipe recipe.Recipe) (bool, error) { - config, err := recipe.GetComposeConfig(nil) + config, err := recipe.GetComposeConfig() if err != nil { return false, err } @@ -361,7 +342,7 @@ func LintNoUnstableTags(recipe recipe.Recipe) (bool, error) { } func LintSemverLikeTags(recipe recipe.Recipe) (bool, error) { - config, err := recipe.GetComposeConfig(nil) + config, err := recipe.GetComposeConfig() if err != nil { return false, err } @@ -388,7 +369,7 @@ func LintSemverLikeTags(recipe recipe.Recipe) (bool, error) { } func LintImagePresent(recipe recipe.Recipe) (bool, error) { - config, err := recipe.GetComposeConfig(nil) + config, err := recipe.GetComposeConfig() if err != nil { return false, err } @@ -440,7 +421,7 @@ func LintMetadataFilledIn(r recipe.Recipe) (bool, error) { } func LintAbraShVendors(recipe recipe.Recipe) (bool, error) { - config, err := recipe.GetComposeConfig(nil) + config, err := recipe.GetComposeConfig() if err != nil { return false, err } @@ -472,7 +453,7 @@ func LintHasRecipeRepo(recipe recipe.Recipe) (bool, error) { } func LintSecretLengths(recipe recipe.Recipe) (bool, error) { - config, err := recipe.GetComposeConfig(nil) + config, err := recipe.GetComposeConfig() if err != nil { return false, err } diff --git a/pkg/recipe/compose.go b/pkg/recipe/compose.go index 6faea26c..ce57519c 100644 --- a/pkg/recipe/compose.go +++ b/pkg/recipe/compose.go @@ -11,10 +11,9 @@ import ( "coopcloud.tech/abra/pkg/formatter" "coopcloud.tech/abra/pkg/i18n" "coopcloud.tech/abra/pkg/log" - "coopcloud.tech/abra/pkg/upstream/stack" loader "coopcloud.tech/abra/pkg/upstream/stack" + composeGoTypes "github.com/compose-spec/compose-go/v2/types" "github.com/distribution/reference" - composetypes "github.com/docker/cli/cli/compose/types" ) // GetComposeFiles gets the list of compose files for an app (or recipe if you @@ -61,7 +60,7 @@ func (r Recipe) GetComposeFiles(appEnv map[string]string) ([]string, error) { return composeFiles, nil } -func (r Recipe) GetComposeConfig(env map[string]string) (*composetypes.Config, error) { +func (r Recipe) GetComposeConfig() (*composeGoTypes.Project, error) { pattern := fmt.Sprintf("%s/compose**yml", r.Dir) composeFiles, err := filepath.Glob(pattern) if err != nil { @@ -72,25 +71,18 @@ func (r Recipe) GetComposeConfig(env map[string]string) (*composetypes.Config, e return nil, errors.New(i18n.G("%s is missing a compose.yml or compose.*.yml file?", r.Name)) } - if env == nil { - env, err = r.SampleEnv() - if err != nil { - return nil, err - } - } - - opts := stack.Deploy{Composefiles: composeFiles} - config, err := loader.LoadComposefile(opts, env) + config, err := loader.LoadCompose(loader.LoadConf{ComposeFiles: composeFiles}) if err != nil { return nil, err } + return config, nil } // GetVersionLabelLocal retrieves the version label on the local recipe config func (r Recipe) GetVersionLabelLocal() (string, error) { var label string - config, err := r.GetComposeConfig(nil) + config, err := r.GetComposeConfig() if err != nil { return "", err } @@ -123,14 +115,7 @@ func (r Recipe) UpdateTag(image, tag string) (bool, error) { log.Debug(i18n.G("considering %s config(s) for tag update", strings.Join(composeFiles, ", "))) for _, composeFile := range composeFiles { - opts := stack.Deploy{Composefiles: []string{composeFile}} - - sampleEnv, err := r.SampleEnv() - if err != nil { - return false, err - } - - compose, err := loader.LoadComposefile(opts, sampleEnv) + compose, err := loader.LoadCompose(loader.LoadConf{ComposeFiles: []string{composeFile}}) if err != nil { return false, err } @@ -168,9 +153,9 @@ func (r Recipe) UpdateTag(image, tag string) (bool, error) { new := fmt.Sprintf("%s:%s", composeImage, tag) replacedBytes := strings.Replace(string(bytes), old, new, -1) - log.Debug(i18n.G("updating %s to %s in %s", old, new, compose.Filename)) + log.Debug(i18n.G("updating %s to %s in %s", old, new, compose.Name)) - if err := os.WriteFile(compose.Filename, []byte(replacedBytes), 0o764); err != nil { + if err := os.WriteFile(compose.Name, []byte(replacedBytes), 0o764); err != nil { return false, err } } @@ -191,20 +176,13 @@ func (r Recipe) UpdateLabel(pattern, serviceName, label string) error { log.Debug(i18n.G("considering %s config(s) for label update", strings.Join(composeFiles, ", "))) for _, composeFile := range composeFiles { - opts := stack.Deploy{Composefiles: []string{composeFile}} - - sampleEnv, err := r.SampleEnv() - if err != nil { - return err - } - - compose, err := loader.LoadComposefile(opts, sampleEnv) + compose, err := loader.LoadCompose(loader.LoadConf{ComposeFiles: []string{composeFile}}) if err != nil { return err } serviceExists := false - var service composetypes.ServiceConfig + var service composeGoTypes.ServiceConfig for _, s := range compose.Services { if s.Name == serviceName { service = s @@ -234,9 +212,9 @@ func (r Recipe) UpdateLabel(pattern, serviceName, label string) error { return nil } - log.Debug(i18n.G("updating %s to %s in %s", old, label, compose.Filename)) + log.Debug(i18n.G("updating %s to %s in %s", old, label, compose.Name)) - if err := ioutil.WriteFile(compose.Filename, []byte(replacedBytes), 0o764); err != nil { + if err := ioutil.WriteFile(compose.Name, []byte(replacedBytes), 0o764); err != nil { return err } diff --git a/pkg/recipe/git.go b/pkg/recipe/git.go index eff8f5f8..f6a95f10 100644 --- a/pkg/recipe/git.go +++ b/pkg/recipe/git.go @@ -416,7 +416,7 @@ func (r Recipe) GetRecipeVersions() (RecipeVersions, []string, error) { log.Debug(i18n.G("git checkout: %s in %s", ref.Name(), r.Dir)) - config, err := r.GetComposeConfig(nil) + config, err := r.GetComposeConfig() if err != nil { log.Debug(i18n.G("failed to get compose config for %s: %s", tag, err)) warnMsg = append(warnMsg, i18n.G("skipping tag %s: invalid compose config: %s", tag, err)) diff --git a/pkg/secret/secret.go b/pkg/secret/secret.go index 4d6833f9..8f9adeb3 100644 --- a/pkg/secret/secret.go +++ b/pkg/secret/secret.go @@ -20,7 +20,6 @@ import ( "coopcloud.tech/abra/pkg/envfile" "coopcloud.tech/abra/pkg/i18n" "coopcloud.tech/abra/pkg/log" - "coopcloud.tech/abra/pkg/upstream/stack" loader "coopcloud.tech/abra/pkg/upstream/stack" "github.com/decentral1se/passgen" "github.com/docker/docker/api/types" @@ -122,14 +121,13 @@ func ReadSecretsConfig(appEnvPath string, composeFiles []string, stackName strin // Set the STACK_NAME to be able to generate the remote name correctly. appEnv["STACK_NAME"] = stackName - opts := stack.Deploy{Composefiles: composeFiles} - composeConfig, err := loader.LoadComposefile(opts, appEnv) + composeConfig, err := loader.LoadCompose(loader.LoadConf{ComposeFiles: composeFiles, AppEnv: appEnv}) if err != nil { return nil, err } // Read the compose files without injecting environment variables. - configWithoutEnv, err := loader.LoadComposefile(opts, map[string]string{}, loader.SkipInterpolation) + configWithoutEnv, err := loader.LoadCompose(loader.LoadConf{ComposeFiles: composeFiles}) if err != nil { return nil, err } diff --git a/pkg/test/test.go b/pkg/test/test.go index 302934f6..ba1141b9 100644 --- a/pkg/test/test.go +++ b/pkg/test/test.go @@ -5,6 +5,8 @@ import ( "log" "os" "path" + "path/filepath" + "runtime" gitPkg "coopcloud.tech/abra/pkg/git" "git.coopcloud.tech/toolshed/godotenv" @@ -12,6 +14,7 @@ import ( var ( AppName = "test_app.example.com" + StackName = "test_app_example_com" ServerName = "test_server" RecipeName = "test_recipe" @@ -59,13 +62,19 @@ func Setup() { } } - serverSrcDir := os.ExpandEnv("$PWD/../../tests/resources/test_server") + _, f, _, ok := runtime.Caller(0) + if !ok { + log.Fatal("Setup: unable to discover current working directory of file") + } + pwd := filepath.Dir(f) + + serverSrcDir := filepath.Join(pwd, "/../../tests/resources/test_server") serverDestDir := os.ExpandEnv("$ABRA_DIR/servers/test_server") if err := os.CopyFS(serverDestDir, os.DirFS(serverSrcDir)); err != nil { log.Fatal(err) } - recipeSrcDir := os.ExpandEnv("$PWD/../../tests/resources/test_recipe") + recipeSrcDir := filepath.Join(pwd, "/../../tests/resources/test_recipe") recipeDestDir := os.ExpandEnv("$ABRA_DIR/recipes/test_recipe") if err := os.CopyFS(recipeDestDir, os.DirFS(recipeSrcDir)); err != nil { log.Fatal(err) diff --git a/pkg/upstream/convert/compose.go b/pkg/upstream/convert/compose.go index af3ca930..480b2ef8 100644 --- a/pkg/upstream/convert/compose.go +++ b/pkg/upstream/convert/compose.go @@ -4,7 +4,7 @@ import ( "io/ioutil" "strings" - composetypes "github.com/docker/cli/cli/compose/types" + composeGoTypes "github.com/compose-spec/compose-go/v2/types" networktypes "github.com/docker/docker/api/types/network" "github.com/docker/docker/api/types/swarm" ) @@ -48,19 +48,17 @@ func AddStackLabel(namespace Namespace, labels map[string]string) map[string]str return labels } -type networkMap map[string]composetypes.NetworkConfig - // Networks from the compose-file type to the engine API type -func Networks(namespace Namespace, networks networkMap, servicesNetworks map[string]struct{}) (map[string]networktypes.CreateOptions, []string) { +func Networks(namespace Namespace, networks map[string]composeGoTypes.NetworkConfig, servicesNetworks map[string]struct{}) (map[string]networktypes.CreateOptions, []string) { if networks == nil { - networks = make(map[string]composetypes.NetworkConfig) + networks = make(map[string]composeGoTypes.NetworkConfig) } externalNetworks := []string{} result := make(map[string]networktypes.CreateOptions) for internalName := range servicesNetworks { network := networks[internalName] - if network.External.External { + if network.External { externalNetworks = append(externalNetworks, network.Name) continue } @@ -98,19 +96,19 @@ func Networks(namespace Namespace, networks networkMap, servicesNetworks map[str } // Secrets converts secrets from the Compose type to the engine API type -func Secrets(namespace Namespace, secrets map[string]composetypes.SecretConfig) ([]swarm.SecretSpec, error) { +func Secrets(namespace Namespace, secrets map[string]composeGoTypes.SecretConfig) ([]swarm.SecretSpec, error) { result := []swarm.SecretSpec{} for name, secret := range secrets { - if secret.External.External { + if secret.External { continue } var obj swarmFileObject var err error if secret.Driver != "" { - obj = driverObjectConfig(namespace, name, composetypes.FileObjectConfig(secret)) + obj = driverObjectConfig(namespace, name, composeGoTypes.FileObjectConfig(secret)) } else { - obj, err = fileObjectConfig(namespace, name, composetypes.FileObjectConfig(secret)) + obj, err = fileObjectConfig(namespace, name, composeGoTypes.FileObjectConfig(secret)) } if err != nil { return nil, err @@ -133,14 +131,14 @@ func Secrets(namespace Namespace, secrets map[string]composetypes.SecretConfig) } // Configs converts config objects from the Compose type to the engine API type -func Configs(namespace Namespace, configs map[string]composetypes.ConfigObjConfig) ([]swarm.ConfigSpec, error) { +func Configs(namespace Namespace, configs map[string]composeGoTypes.ConfigObjConfig) ([]swarm.ConfigSpec, error) { result := []swarm.ConfigSpec{} for name, config := range configs { - if config.External.External { + if config.External { continue } - obj, err := fileObjectConfig(namespace, name, composetypes.FileObjectConfig(config)) + obj, err := fileObjectConfig(namespace, name, composeGoTypes.FileObjectConfig(config)) if err != nil { return nil, err } @@ -160,7 +158,7 @@ type swarmFileObject struct { Data []byte } -func driverObjectConfig(namespace Namespace, name string, obj composetypes.FileObjectConfig) swarmFileObject { +func driverObjectConfig(namespace Namespace, name string, obj composeGoTypes.FileObjectConfig) swarmFileObject { if obj.Name != "" { name = obj.Name } else { @@ -176,7 +174,7 @@ func driverObjectConfig(namespace Namespace, name string, obj composetypes.FileO } } -func fileObjectConfig(namespace Namespace, name string, obj composetypes.FileObjectConfig) (swarmFileObject, error) { +func fileObjectConfig(namespace Namespace, name string, obj composeGoTypes.FileObjectConfig) (swarmFileObject, error) { data, err := ioutil.ReadFile(obj.File) if err != nil { return swarmFileObject{}, err diff --git a/pkg/upstream/convert/compose_test.go b/pkg/upstream/convert/compose_test.go deleted file mode 100644 index c13783ea..00000000 --- a/pkg/upstream/convert/compose_test.go +++ /dev/null @@ -1,170 +0,0 @@ -package convert // https://github.com/docker/cli/blob/master/cli/compose/convert/compose_test.go - -import ( - "testing" - - composetypes "github.com/docker/cli/cli/compose/types" - "github.com/docker/docker/api/types/network" - "gotest.tools/v3/assert" - is "gotest.tools/v3/assert/cmp" - "gotest.tools/v3/fs" -) - -func TestNamespaceScope(t *testing.T) { - scoped := Namespace{name: "foo"}.Scope("bar") - assert.Check(t, is.Equal("foo_bar", scoped)) -} - -func TestAddStackLabel(t *testing.T) { - labels := map[string]string{ - "something": "labeled", - } - actual := AddStackLabel(Namespace{name: "foo"}, labels) - expected := map[string]string{ - "something": "labeled", - LabelNamespace: "foo", - } - assert.Check(t, is.DeepEqual(expected, actual)) -} - -func TestNetworks(t *testing.T) { - namespace := Namespace{name: "foo"} - serviceNetworks := map[string]struct{}{ - "normal": {}, - "outside": {}, - "default": {}, - "attachablenet": {}, - "named": {}, - } - source := networkMap{ - "normal": composetypes.NetworkConfig{ - Driver: "overlay", - DriverOpts: map[string]string{ - "opt": "value", - }, - Ipam: composetypes.IPAMConfig{ - Driver: "driver", - Config: []*composetypes.IPAMPool{ - { - Subnet: "10.0.0.0", - }, - }, - }, - Labels: map[string]string{ - "something": "labeled", - }, - }, - "outside": composetypes.NetworkConfig{ - External: composetypes.External{External: true}, - Name: "special", - }, - "attachablenet": composetypes.NetworkConfig{ - Driver: "overlay", - Attachable: true, - }, - "named": composetypes.NetworkConfig{ - Name: "othername", - }, - } - expected := map[string]network.CreateOptions{ - "foo_default": { - Labels: map[string]string{ - LabelNamespace: "foo", - }, - }, - "foo_normal": { - Driver: "overlay", - IPAM: &network.IPAM{ - Driver: "driver", - Config: []network.IPAMConfig{ - { - Subnet: "10.0.0.0", - }, - }, - }, - Options: map[string]string{ - "opt": "value", - }, - Labels: map[string]string{ - LabelNamespace: "foo", - "something": "labeled", - }, - }, - "foo_attachablenet": { - Driver: "overlay", - Attachable: true, - Labels: map[string]string{ - LabelNamespace: "foo", - }, - }, - "othername": { - Labels: map[string]string{LabelNamespace: "foo"}, - }, - } - - networks, externals := Networks(namespace, source, serviceNetworks) - assert.DeepEqual(t, expected, networks) - assert.DeepEqual(t, []string{"special"}, externals) -} - -func TestSecrets(t *testing.T) { - namespace := Namespace{name: "foo"} - - secretText := "this is the first secret" - secretFile := fs.NewFile(t, "convert-secrets", fs.WithContent(secretText)) - defer secretFile.Remove() - - source := map[string]composetypes.SecretConfig{ - "one": { - File: secretFile.Path(), - Labels: map[string]string{"monster": "mash"}, - }, - "ext": { - External: composetypes.External{ - External: true, - }, - }, - } - - specs, err := Secrets(namespace, source) - assert.NilError(t, err) - assert.Assert(t, is.Len(specs, 1)) - secret := specs[0] - assert.Check(t, is.Equal("foo_one", secret.Name)) - assert.Check(t, is.DeepEqual(map[string]string{ - "monster": "mash", - LabelNamespace: "foo", - }, secret.Labels)) - assert.Check(t, is.DeepEqual([]byte(secretText), secret.Data)) -} - -func TestConfigs(t *testing.T) { - namespace := Namespace{name: "foo"} - - configText := "this is the first config" - configFile := fs.NewFile(t, "convert-configs", fs.WithContent(configText)) - defer configFile.Remove() - - source := map[string]composetypes.ConfigObjConfig{ - "one": { - File: configFile.Path(), - Labels: map[string]string{"monster": "mash"}, - }, - "ext": { - External: composetypes.External{ - External: true, - }, - }, - } - - specs, err := Configs(namespace, source) - assert.NilError(t, err) - assert.Assert(t, is.Len(specs, 1)) - config := specs[0] - assert.Check(t, is.Equal("foo_one", config.Name)) - assert.Check(t, is.DeepEqual(map[string]string{ - "monster": "mash", - LabelNamespace: "foo", - }, config.Labels)) - assert.Check(t, is.DeepEqual([]byte(configText), config.Data)) -} diff --git a/pkg/upstream/convert/service.go b/pkg/upstream/convert/service.go index 035d343e..e9a7da08 100644 --- a/pkg/upstream/convert/service.go +++ b/pkg/upstream/convert/service.go @@ -5,11 +5,13 @@ import ( "fmt" "os" "sort" + "strconv" "strings" "time" + "unsafe" "coopcloud.tech/abra/pkg/i18n" - composetypes "github.com/docker/cli/cli/compose/types" + composeGoTypes "github.com/compose-spec/compose-go/v2/types" "github.com/docker/cli/opts" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/container" @@ -178,7 +180,7 @@ func ParseConfigs(client client.ConfigAPIClient, requestedConfigs []*swarmtypes. // Services from compose-file types to engine API types func Services( namespace Namespace, - config *composetypes.Config, + config *composeGoTypes.Project, client client.CommonAPIClient, ) (map[string]swarm.ServiceSpec, error) { result := make(map[string]swarm.ServiceSpec) @@ -211,14 +213,17 @@ func Services( func Service( apiVersion string, namespace Namespace, - service composetypes.ServiceConfig, - networkConfigs map[string]composetypes.NetworkConfig, - volumes map[string]composetypes.VolumeConfig, + service composeGoTypes.ServiceConfig, + networkConfigs map[string]composeGoTypes.NetworkConfig, + volumes map[string]composeGoTypes.VolumeConfig, secrets []*swarm.SecretReference, configs []*swarm.ConfigReference, ) (swarm.ServiceSpec, error) { name := namespace.Scope(service.Name) - endpoint := convertEndpointSpec(service.Deploy.EndpointMode, service.Ports) + endpoint, err := convertEndpointSpec(service.Deploy.EndpointMode, service.Ports) + if err != nil { + return swarm.ServiceSpec{}, err + } mode, err := convertDeployMode(service.Deploy.Mode, service.Deploy.Replicas) if err != nil { @@ -254,9 +259,16 @@ func Service( dnsConfig := convertDNSConfig(service.DNS, service.DNSSearch) var privileges swarm.Privileges + + credSpec := service.CredentialSpec + if credSpec == nil { + credSpec = &composeGoTypes.CredentialSpecConfig{} + } + privileges.CredentialSpec, err = convertCredentialSpec( - namespace, service.CredentialSpec, configs, + namespace, *credSpec, configs, ) + if err != nil { return swarm.ServiceSpec{}, err } @@ -271,6 +283,11 @@ func Service( capAdd, capDrop := opts.EffectiveCapAddCapDrop(service.CapAdd, service.CapDrop) + var stopGracePtr time.Duration + if service.StopGracePeriod != nil { + stopGracePtr = time.Duration(*service.StopGracePeriod) + } + serviceSpec := swarm.ServiceSpec{ Annotations: swarm.Annotations{ Name: name, @@ -290,7 +307,7 @@ func Service( Dir: service.WorkingDir, User: service.User, Mounts: mounts, - StopGracePeriod: composetypes.ConvertDurationPtr(service.StopGracePeriod), + StopGracePeriod: &stopGracePtr, StopSignal: service.StopSignal, TTY: service.Tty, OpenStdin: service.StdinOpen, @@ -338,7 +355,7 @@ func Service( return serviceSpec, nil } -func getPlacementPreference(preferences []composetypes.PlacementPreferences) []swarm.PlacementPreference { +func getPlacementPreference(preferences []composeGoTypes.PlacementPreferences) []swarm.PlacementPreference { result := []swarm.PlacementPreference{} for _, preference := range preferences { spreadDescriptor := preference.Spread @@ -357,13 +374,13 @@ func sortStrings(strs []string) []string { } func convertServiceNetworks( - networks map[string]*composetypes.ServiceNetworkConfig, - networkConfigs networkMap, + networks map[string]*composeGoTypes.ServiceNetworkConfig, + networkConfigs map[string]composeGoTypes.NetworkConfig, namespace Namespace, name string, ) ([]swarm.NetworkAttachmentConfig, error) { if len(networks) == 0 { - networks = map[string]*composetypes.ServiceNetworkConfig{ + networks = map[string]*composeGoTypes.ServiceNetworkConfig{ defaultNetwork: {}, } } @@ -403,20 +420,20 @@ func convertServiceNetworks( func convertServiceSecrets( client client.SecretAPIClient, namespace Namespace, - secrets []composetypes.ServiceSecretConfig, - secretSpecs map[string]composetypes.SecretConfig, + secrets []composeGoTypes.ServiceSecretConfig, + secretSpecs map[string]composeGoTypes.SecretConfig, ) ([]*swarm.SecretReference, error) { refs := []*swarm.SecretReference{} - lookup := func(key string) (composetypes.FileObjectConfig, error) { + lookup := func(key string) (composeGoTypes.FileObjectConfig, error) { secretSpec, exists := secretSpecs[key] if !exists { - return composetypes.FileObjectConfig{}, errors.New(i18n.G("undefined secret %q", key)) + return composeGoTypes.FileObjectConfig{}, errors.New(i18n.G("undefined secret %q", key)) } - return composetypes.FileObjectConfig(secretSpec), nil + return composeGoTypes.FileObjectConfig(secretSpec), nil } for _, secret := range secrets { - obj, err := convertFileObject(namespace, composetypes.FileReferenceConfig(secret), lookup) + obj, err := convertFileObject(namespace, composeGoTypes.FileReferenceConfig(secret), lookup) if err != nil { return nil, err } @@ -451,20 +468,20 @@ func convertServiceSecrets( func convertServiceConfigObjs( client client.ConfigAPIClient, namespace Namespace, - service composetypes.ServiceConfig, - configSpecs map[string]composetypes.ConfigObjConfig, + service composeGoTypes.ServiceConfig, + configSpecs map[string]composeGoTypes.ConfigObjConfig, ) ([]*swarm.ConfigReference, error) { refs := []*swarm.ConfigReference{} - lookup := func(key string) (composetypes.FileObjectConfig, error) { + lookup := func(key string) (composeGoTypes.FileObjectConfig, error) { configSpec, exists := configSpecs[key] if !exists { - return composetypes.FileObjectConfig{}, errors.New(i18n.G("undefined config %q", key)) + return composeGoTypes.FileObjectConfig{}, errors.New(i18n.G("undefined config %q", key)) } - return composetypes.FileObjectConfig(configSpec), nil + return composeGoTypes.FileObjectConfig(configSpec), nil } for _, config := range service.Configs { - obj, err := convertFileObject(namespace, composetypes.FileReferenceConfig(config), lookup) + obj, err := convertFileObject(namespace, composeGoTypes.FileReferenceConfig(config), lookup) if err != nil { return nil, err } @@ -487,7 +504,7 @@ func convertServiceConfigObjs( // if the credSpec uses a config, then we should grab the config name, and // create a config reference for it. A File or Registry-type CredentialSpec // does not need this operation. - if credSpec.Config != "" { + if credSpec != nil && credSpec.Config != "" { // look up the config in the configSpecs. obj, err := lookup(credSpec.Config) if err != nil { @@ -532,8 +549,8 @@ type swarmReferenceObject struct { func convertFileObject( namespace Namespace, - config composetypes.FileReferenceConfig, - lookup func(key string) (composetypes.FileObjectConfig, error), + config composeGoTypes.FileReferenceConfig, + lookup func(key string) (composeGoTypes.FileObjectConfig, error), ) (swarmReferenceObject, error) { obj, err := lookup(config.Source) if err != nil { @@ -558,40 +575,37 @@ func convertFileObject( if gid == "" { gid = "0" } - mode := config.Mode - if mode == nil { - mode = uint32Ptr(0444) - } - return swarmReferenceObject{ + ref := swarmReferenceObject{ File: swarmReferenceTarget{ Name: target, UID: uid, GID: gid, - Mode: os.FileMode(*mode), }, Name: source, - }, nil -} + } -func uint32Ptr(value uint32) *uint32 { - return &value + if config.Mode == nil { + defaultMode := 0444 + ref.File.Mode = os.FileMode(defaultMode) + } else { + ref.File.Mode = os.FileMode(*config.Mode) + } + + return ref, nil } // convertExtraHosts converts : mappings to SwarmKit notation: // "IP-address hostname(s)". The original order of mappings is preserved. -func convertExtraHosts(extraHosts composetypes.HostsList) []string { +func convertExtraHosts(extraHosts composeGoTypes.HostsList) []string { hosts := []string{} - for _, hostIP := range extraHosts { - if v := strings.SplitN(hostIP, ":", 2); len(v) == 2 { - // Convert to SwarmKit notation: IP-address hostname(s) - hosts = append(hosts, fmt.Sprintf("%s %s", v[1], v[0])) - } + for hostName, hostIP := range extraHosts { + hosts = append(hosts, fmt.Sprintf("%s %s", hostIP, hostName)) } return hosts } -func convertHealthcheck(healthcheck *composetypes.HealthCheckConfig) (*container.HealthConfig, error) { +func convertHealthcheck(healthcheck *composeGoTypes.HealthCheckConfig) (*container.HealthConfig, error) { if healthcheck == nil { return nil, nil } @@ -629,7 +643,7 @@ func convertHealthcheck(healthcheck *composetypes.HealthCheckConfig) (*container }, nil } -func convertRestartPolicy(restart string, source *composetypes.RestartPolicy) (*swarm.RestartPolicy, error) { +func convertRestartPolicy(restart string, source *composeGoTypes.RestartPolicy) (*swarm.RestartPolicy, error) { if source == nil { policy, err := opts.ParseRestartPolicy(restart) if err != nil { @@ -653,15 +667,25 @@ func convertRestartPolicy(restart string, source *composetypes.RestartPolicy) (* } } + var windowPtr time.Duration + if source.Window != nil { + windowPtr = time.Duration(*source.Window) + } + + var delayPtr time.Duration + if source.Delay != nil { + delayPtr = time.Duration(*source.Delay) + } + return &swarm.RestartPolicy{ Condition: swarm.RestartPolicyCondition(source.Condition), - Delay: composetypes.ConvertDurationPtr(source.Delay), + Delay: &delayPtr, MaxAttempts: source.MaxAttempts, - Window: composetypes.ConvertDurationPtr(source.Window), + Window: &windowPtr, }, nil } -func convertUpdateConfig(source *composetypes.UpdateConfig) *swarm.UpdateConfig { +func convertUpdateConfig(source *composeGoTypes.UpdateConfig) *swarm.UpdateConfig { if source == nil { return nil } @@ -679,13 +703,13 @@ func convertUpdateConfig(source *composetypes.UpdateConfig) *swarm.UpdateConfig } } -func convertResources(source composetypes.Resources) (*swarm.ResourceRequirements, error) { +func convertResources(source composeGoTypes.Resources) (*swarm.ResourceRequirements, error) { resources := &swarm.ResourceRequirements{} var err error if source.Limits != nil { var cpus int64 - if source.Limits.NanoCPUs != "" { - cpus, err = opts.ParseCPUs(source.Limits.NanoCPUs) + if source.Limits.NanoCPUs > 0 { + cpus, err = opts.ParseCPUs(fmt.Sprintf("%f", source.Limits.NanoCPUs)) if err != nil { return nil, err } @@ -698,8 +722,8 @@ func convertResources(source composetypes.Resources) (*swarm.ResourceRequirement } if source.Reservations != nil { var cpus int64 - if source.Reservations.NanoCPUs != "" { - cpus, err = opts.ParseCPUs(source.Reservations.NanoCPUs) + if source.Reservations.NanoCPUs > 0 { + cpus, err = opts.ParseCPUs(fmt.Sprintf("%f", source.Reservations.NanoCPUs)) if err != nil { return nil, err } @@ -728,13 +752,29 @@ func convertResources(source composetypes.Resources) (*swarm.ResourceRequirement return resources, nil } -func convertEndpointSpec(endpointMode string, source []composetypes.ServicePortConfig) *swarm.EndpointSpec { +func str2uint32(s string) (uint32, error) { + var u32 uint32 + + u64, err := strconv.ParseUint(s, 10, 32) + if err != nil { + return u32, err + } + + return uint32(u64), nil +} + +func convertEndpointSpec(endpointMode string, source []composeGoTypes.ServicePortConfig) (*swarm.EndpointSpec, error) { portConfigs := []swarm.PortConfig{} for _, port := range source { + published, err := str2uint32(port.Published) + if err != nil { + return &swarm.EndpointSpec{}, err + } + portConfig := swarm.PortConfig{ Protocol: swarm.PortConfigProtocol(port.Protocol), TargetPort: port.Target, - PublishedPort: port.Published, + PublishedPort: published, PublishMode: swarm.PortConfigPublishMode(port.Mode), } portConfigs = append(portConfigs, portConfig) @@ -747,7 +787,7 @@ func convertEndpointSpec(endpointMode string, source []composetypes.ServicePortC return &swarm.EndpointSpec{ Mode: swarm.ResolutionMode(strings.ToLower(endpointMode)), Ports: portConfigs, - } + }, nil } func convertEnvironment(source map[string]*string) []string { @@ -765,7 +805,7 @@ func convertEnvironment(source map[string]*string) []string { return output } -func convertDeployMode(mode string, replicas *uint64) (swarm.ServiceMode, error) { +func convertDeployMode(mode string, replicas *int) (swarm.ServiceMode, error) { serviceMode := swarm.ServiceMode{} switch mode { @@ -775,7 +815,8 @@ func convertDeployMode(mode string, replicas *uint64) (swarm.ServiceMode, error) } serviceMode.Global = &swarm.GlobalService{} case "replicated", "": - serviceMode.Replicated = &swarm.ReplicatedService{Replicas: replicas} + convReplicas := (*uint64)(unsafe.Pointer(replicas)) + serviceMode.Replicated = &swarm.ReplicatedService{Replicas: convReplicas} default: return serviceMode, errors.New(i18n.G("unknown mode: %s", mode)) } @@ -792,7 +833,7 @@ func convertDNSConfig(DNS []string, DNSSearch []string) *swarm.DNSConfig { return nil } -func convertCredentialSpec(namespace Namespace, spec composetypes.CredentialSpecConfig, refs []*swarm.ConfigReference) (*swarm.CredentialSpec, error) { +func convertCredentialSpec(namespace Namespace, spec composeGoTypes.CredentialSpecConfig, refs []*swarm.ConfigReference) (*swarm.CredentialSpec, error) { var o []string // Config was added in API v1.40 @@ -814,7 +855,13 @@ func convertCredentialSpec(namespace Namespace, spec composetypes.CredentialSpec case l > 2: return nil, errors.New(i18n.G("invalid credential spec: cannot specify both %s, and %s", strings.Join(o[:l-1], ", "), o[l-1])) } - swarmCredSpec := swarm.CredentialSpec(spec) + + swarmCredSpec := swarm.CredentialSpec{ + Config: spec.Config, + File: spec.File, + Registry: spec.Registry, + } + // if we're using a swarm Config for the credential spec, over-write it // here with the config ID if swarmCredSpec.Config != "" { @@ -836,7 +883,7 @@ func convertCredentialSpec(namespace Namespace, spec composetypes.CredentialSpec return &swarmCredSpec, nil } -func convertUlimits(origUlimits map[string]*composetypes.UlimitsConfig) []*units.Ulimit { +func convertUlimits(origUlimits map[string]*composeGoTypes.UlimitsConfig) []*units.Ulimit { newUlimits := make(map[string]*units.Ulimit) for name, u := range origUlimits { if u.Single != 0 { diff --git a/pkg/upstream/convert/service_test.go b/pkg/upstream/convert/service_test.go deleted file mode 100644 index 1f6d289e..00000000 --- a/pkg/upstream/convert/service_test.go +++ /dev/null @@ -1,678 +0,0 @@ -package convert // https://github.com/docker/cli/blob/master/cli/compose/convert/service_test.go - -import ( - "context" - "os" - "sort" - "strings" - "testing" - "time" - - composetypes "github.com/docker/cli/cli/compose/types" - "github.com/docker/docker/api/types" - "github.com/docker/docker/api/types/container" - "github.com/docker/docker/api/types/swarm" - "github.com/docker/docker/client" - "github.com/pkg/errors" - "gotest.tools/v3/assert" - is "gotest.tools/v3/assert/cmp" -) - -func TestConvertRestartPolicyFromNone(t *testing.T) { - policy, err := convertRestartPolicy("no", nil) - assert.NilError(t, err) - assert.Check(t, is.DeepEqual((*swarm.RestartPolicy)(nil), policy)) -} - -func TestConvertRestartPolicyFromUnknown(t *testing.T) { - _, err := convertRestartPolicy("unknown", nil) - assert.Error(t, err, "unknown restart policy: unknown") -} - -func TestConvertRestartPolicyFromAlways(t *testing.T) { - policy, err := convertRestartPolicy("always", nil) - expected := &swarm.RestartPolicy{ - Condition: swarm.RestartPolicyConditionAny, - } - assert.NilError(t, err) - assert.Check(t, is.DeepEqual(expected, policy)) -} - -func TestConvertRestartPolicyFromFailure(t *testing.T) { - policy, err := convertRestartPolicy("on-failure:4", nil) - attempts := uint64(4) - expected := &swarm.RestartPolicy{ - Condition: swarm.RestartPolicyConditionOnFailure, - MaxAttempts: &attempts, - } - assert.NilError(t, err) - assert.Check(t, is.DeepEqual(expected, policy)) -} - -func strPtr(val string) *string { - return &val -} - -func TestConvertEnvironment(t *testing.T) { - source := map[string]*string{ - "foo": strPtr("bar"), - "key": strPtr("value"), - } - env := convertEnvironment(source) - sort.Strings(env) - assert.Check(t, is.DeepEqual([]string{"foo=bar", "key=value"}, env)) -} - -func TestConvertExtraHosts(t *testing.T) { - source := composetypes.HostsList{ - "zulu:127.0.0.2", - "alpha:127.0.0.1", - "zulu:ff02::1", - } - assert.Check(t, is.DeepEqual([]string{"127.0.0.2 zulu", "127.0.0.1 alpha", "ff02::1 zulu"}, convertExtraHosts(source))) -} - -func TestConvertResourcesFull(t *testing.T) { - source := composetypes.Resources{ - Limits: &composetypes.ResourceLimit{ - NanoCPUs: "0.003", - MemoryBytes: composetypes.UnitBytes(300000000), - }, - Reservations: &composetypes.Resource{ - NanoCPUs: "0.002", - MemoryBytes: composetypes.UnitBytes(200000000), - }, - } - resources, err := convertResources(source) - assert.NilError(t, err) - - expected := &swarm.ResourceRequirements{ - Limits: &swarm.Limit{ - NanoCPUs: 3000000, - MemoryBytes: 300000000, - }, - Reservations: &swarm.Resources{ - NanoCPUs: 2000000, - MemoryBytes: 200000000, - }, - } - assert.Check(t, is.DeepEqual(expected, resources)) -} - -func TestConvertResourcesOnlyMemory(t *testing.T) { - source := composetypes.Resources{ - Limits: &composetypes.ResourceLimit{ - MemoryBytes: composetypes.UnitBytes(300000000), - }, - Reservations: &composetypes.Resource{ - MemoryBytes: composetypes.UnitBytes(200000000), - }, - } - resources, err := convertResources(source) - assert.NilError(t, err) - - expected := &swarm.ResourceRequirements{ - Limits: &swarm.Limit{ - MemoryBytes: 300000000, - }, - Reservations: &swarm.Resources{ - MemoryBytes: 200000000, - }, - } - assert.Check(t, is.DeepEqual(expected, resources)) -} - -func TestConvertHealthcheck(t *testing.T) { - retries := uint64(10) - timeout := composetypes.Duration(30 * time.Second) - interval := composetypes.Duration(2 * time.Millisecond) - source := &composetypes.HealthCheckConfig{ - Test: []string{"EXEC", "touch", "/foo"}, - Timeout: &timeout, - Interval: &interval, - Retries: &retries, - } - expected := &container.HealthConfig{ - Test: source.Test, - Timeout: time.Duration(timeout), - Interval: time.Duration(interval), - Retries: 10, - } - - healthcheck, err := convertHealthcheck(source) - assert.NilError(t, err) - assert.Check(t, is.DeepEqual(expected, healthcheck)) -} - -func TestConvertHealthcheckDisable(t *testing.T) { - source := &composetypes.HealthCheckConfig{Disable: true} - expected := &container.HealthConfig{ - Test: []string{"NONE"}, - } - - healthcheck, err := convertHealthcheck(source) - assert.NilError(t, err) - assert.Check(t, is.DeepEqual(expected, healthcheck)) -} - -func TestConvertHealthcheckDisableWithTest(t *testing.T) { - source := &composetypes.HealthCheckConfig{ - Disable: true, - Test: []string{"EXEC", "touch"}, - } - _, err := convertHealthcheck(source) - assert.Error(t, err, "test and disable can't be set at the same time") -} - -func TestConvertEndpointSpec(t *testing.T) { - source := []composetypes.ServicePortConfig{ - { - Protocol: "udp", - Target: 53, - Published: 1053, - Mode: "host", - }, - { - Target: 8080, - Published: 80, - }, - } - endpoint := convertEndpointSpec("vip", source) - - expected := swarm.EndpointSpec{ - Mode: swarm.ResolutionMode(strings.ToLower("vip")), - Ports: []swarm.PortConfig{ - { - TargetPort: 8080, - PublishedPort: 80, - }, - { - Protocol: "udp", - TargetPort: 53, - PublishedPort: 1053, - PublishMode: "host", - }, - }, - } - - assert.Check(t, is.DeepEqual(expected, *endpoint)) -} - -func TestConvertServiceNetworksOnlyDefault(t *testing.T) { - networkConfigs := networkMap{} - - configs, err := convertServiceNetworks( - nil, networkConfigs, NewNamespace("foo"), "service") - - expected := []swarm.NetworkAttachmentConfig{ - { - Target: "foo_default", - Aliases: []string{"service"}, - }, - } - - assert.NilError(t, err) - assert.Check(t, is.DeepEqual(expected, configs)) -} - -func TestConvertServiceNetworks(t *testing.T) { - networkConfigs := networkMap{ - "front": composetypes.NetworkConfig{ - External: composetypes.External{External: true}, - Name: "fronttier", - }, - "back": composetypes.NetworkConfig{}, - } - networks := map[string]*composetypes.ServiceNetworkConfig{ - "front": { - Aliases: []string{"something"}, - }, - "back": { - Aliases: []string{"other"}, - }, - } - - configs, err := convertServiceNetworks( - networks, networkConfigs, NewNamespace("foo"), "service") - - expected := []swarm.NetworkAttachmentConfig{ - { - Target: "foo_back", - Aliases: []string{"other", "service"}, - }, - { - Target: "fronttier", - Aliases: []string{"something", "service"}, - }, - } - - assert.NilError(t, err) - assert.Check(t, is.DeepEqual(expected, configs)) -} - -func TestConvertServiceNetworksCustomDefault(t *testing.T) { - networkConfigs := networkMap{ - "default": composetypes.NetworkConfig{ - External: composetypes.External{External: true}, - Name: "custom", - }, - } - networks := map[string]*composetypes.ServiceNetworkConfig{} - - configs, err := convertServiceNetworks( - networks, networkConfigs, NewNamespace("foo"), "service") - - expected := []swarm.NetworkAttachmentConfig{ - { - Target: "custom", - Aliases: []string{"service"}, - }, - } - - assert.NilError(t, err) - assert.Check(t, is.DeepEqual(expected, configs)) -} - -func TestConvertDNSConfigEmpty(t *testing.T) { - dnsConfig := convertDNSConfig(nil, nil) - assert.Check(t, is.DeepEqual((*swarm.DNSConfig)(nil), dnsConfig)) -} - -var ( - nameservers = []string{"8.8.8.8", "9.9.9.9"} - search = []string{"dc1.example.com", "dc2.example.com"} -) - -func TestConvertDNSConfigAll(t *testing.T) { - dnsConfig := convertDNSConfig(nameservers, search) - assert.Check(t, is.DeepEqual(&swarm.DNSConfig{ - Nameservers: nameservers, - Search: search, - }, dnsConfig)) -} - -func TestConvertDNSConfigNameservers(t *testing.T) { - dnsConfig := convertDNSConfig(nameservers, nil) - assert.Check(t, is.DeepEqual(&swarm.DNSConfig{ - Nameservers: nameservers, - Search: nil, - }, dnsConfig)) -} - -func TestConvertDNSConfigSearch(t *testing.T) { - dnsConfig := convertDNSConfig(nil, search) - assert.Check(t, is.DeepEqual(&swarm.DNSConfig{ - Nameservers: nil, - Search: search, - }, dnsConfig)) -} - -func TestConvertCredentialSpec(t *testing.T) { - tests := []struct { - name string - in composetypes.CredentialSpecConfig - out *swarm.CredentialSpec - configs []*swarm.ConfigReference - expectedErr string - }{ - { - name: "empty", - }, - { - name: "config-and-file", - in: composetypes.CredentialSpecConfig{Config: "0bt9dmxjvjiqermk6xrop3ekq", File: "somefile.json"}, - expectedErr: `invalid credential spec: cannot specify both "Config" and "File"`, - }, - { - name: "config-and-registry", - in: composetypes.CredentialSpecConfig{Config: "0bt9dmxjvjiqermk6xrop3ekq", Registry: "testing"}, - expectedErr: `invalid credential spec: cannot specify both "Config" and "Registry"`, - }, - { - name: "file-and-registry", - in: composetypes.CredentialSpecConfig{File: "somefile.json", Registry: "testing"}, - expectedErr: `invalid credential spec: cannot specify both "File" and "Registry"`, - }, - { - name: "config-and-file-and-registry", - in: composetypes.CredentialSpecConfig{Config: "0bt9dmxjvjiqermk6xrop3ekq", File: "somefile.json", Registry: "testing"}, - expectedErr: `invalid credential spec: cannot specify both "Config", "File", and "Registry"`, - }, - { - name: "missing-config-reference", - in: composetypes.CredentialSpecConfig{Config: "missing"}, - expectedErr: "invalid credential spec: spec specifies config missing, but no such config can be found", - configs: []*swarm.ConfigReference{ - { - ConfigName: "someName", - ConfigID: "missing", - }, - }, - }, - { - name: "namespaced-config", - in: composetypes.CredentialSpecConfig{Config: "name"}, - configs: []*swarm.ConfigReference{ - { - ConfigName: "namespaced-config_name", - ConfigID: "someID", - }, - }, - out: &swarm.CredentialSpec{Config: "someID"}, - }, - { - name: "config", - in: composetypes.CredentialSpecConfig{Config: "someName"}, - configs: []*swarm.ConfigReference{ - { - ConfigName: "someOtherName", - ConfigID: "someOtherID", - }, { - ConfigName: "someName", - ConfigID: "someID", - }, - }, - out: &swarm.CredentialSpec{Config: "someID"}, - }, - { - name: "file", - in: composetypes.CredentialSpecConfig{File: "somefile.json"}, - out: &swarm.CredentialSpec{File: "somefile.json"}, - }, - { - name: "registry", - in: composetypes.CredentialSpecConfig{Registry: "testing"}, - out: &swarm.CredentialSpec{Registry: "testing"}, - }, - } - - for _, tc := range tests { - tc := tc - t.Run(tc.name, func(t *testing.T) { - namespace := NewNamespace(tc.name) - swarmSpec, err := convertCredentialSpec(namespace, tc.in, tc.configs) - - if tc.expectedErr != "" { - assert.Error(t, err, tc.expectedErr) - } else { - assert.NilError(t, err) - } - assert.DeepEqual(t, swarmSpec, tc.out) - }) - } -} - -func TestConvertUpdateConfigOrder(t *testing.T) { - // test default behavior - updateConfig := convertUpdateConfig(&composetypes.UpdateConfig{}) - assert.Check(t, is.Equal("", updateConfig.Order)) - - // test start-first - updateConfig = convertUpdateConfig(&composetypes.UpdateConfig{ - Order: "start-first", - }) - assert.Check(t, is.Equal(updateConfig.Order, "start-first")) - - // test stop-first - updateConfig = convertUpdateConfig(&composetypes.UpdateConfig{ - Order: "stop-first", - }) - assert.Check(t, is.Equal(updateConfig.Order, "stop-first")) -} - -func TestConvertFileObject(t *testing.T) { - namespace := NewNamespace("testing") - config := composetypes.FileReferenceConfig{ - Source: "source", - Target: "target", - UID: "user", - GID: "group", - Mode: uint32Ptr(0644), - } - swarmRef, err := convertFileObject(namespace, config, lookupConfig) - assert.NilError(t, err) - - expected := swarmReferenceObject{ - Name: "testing_source", - File: swarmReferenceTarget{ - Name: config.Target, - UID: config.UID, - GID: config.GID, - Mode: os.FileMode(0644), - }, - } - assert.Check(t, is.DeepEqual(expected, swarmRef)) -} - -func lookupConfig(key string) (composetypes.FileObjectConfig, error) { - if key != "source" { - return composetypes.FileObjectConfig{}, errors.New("bad key") - } - return composetypes.FileObjectConfig{}, nil -} - -func TestConvertFileObjectDefaults(t *testing.T) { - namespace := NewNamespace("testing") - config := composetypes.FileReferenceConfig{Source: "source"} - swarmRef, err := convertFileObject(namespace, config, lookupConfig) - assert.NilError(t, err) - - expected := swarmReferenceObject{ - Name: "testing_source", - File: swarmReferenceTarget{ - Name: config.Source, - UID: "0", - GID: "0", - Mode: os.FileMode(0444), - }, - } - assert.Check(t, is.DeepEqual(expected, swarmRef)) -} - -func TestServiceConvertsIsolation(t *testing.T) { - src := composetypes.ServiceConfig{ - Isolation: "hyperv", - } - result, err := Service("1.35", Namespace{name: "foo"}, src, nil, nil, nil, nil) - assert.NilError(t, err) - assert.Check(t, is.Equal(container.IsolationHyperV, result.TaskTemplate.ContainerSpec.Isolation)) -} - -func TestConvertServiceSecrets(t *testing.T) { - namespace := Namespace{name: "foo"} - secrets := []composetypes.ServiceSecretConfig{ - {Source: "foo_secret"}, - {Source: "bar_secret"}, - } - secretSpecs := map[string]composetypes.SecretConfig{ - "foo_secret": { - Name: "foo_secret", - }, - "bar_secret": { - Name: "bar_secret", - }, - } - client := &fakeClient{ - secretListFunc: func(opts types.SecretListOptions) ([]swarm.Secret, error) { - assert.Check(t, is.Contains(opts.Filters.Get("name"), "foo_secret")) - assert.Check(t, is.Contains(opts.Filters.Get("name"), "bar_secret")) - return []swarm.Secret{ - {Spec: swarm.SecretSpec{Annotations: swarm.Annotations{Name: "foo_secret"}}}, - {Spec: swarm.SecretSpec{Annotations: swarm.Annotations{Name: "bar_secret"}}}, - }, nil - }, - } - refs, err := convertServiceSecrets(client, namespace, secrets, secretSpecs) - assert.NilError(t, err) - expected := []*swarm.SecretReference{ - { - SecretName: "bar_secret", - File: &swarm.SecretReferenceFileTarget{ - Name: "bar_secret", - UID: "0", - GID: "0", - Mode: 0444, - }, - }, - { - SecretName: "foo_secret", - File: &swarm.SecretReferenceFileTarget{ - Name: "foo_secret", - UID: "0", - GID: "0", - Mode: 0444, - }, - }, - } - assert.DeepEqual(t, expected, refs) -} - -func TestConvertServiceConfigs(t *testing.T) { - namespace := Namespace{name: "foo"} - service := composetypes.ServiceConfig{ - Configs: []composetypes.ServiceConfigObjConfig{ - {Source: "foo_config"}, - {Source: "bar_config"}, - }, - CredentialSpec: composetypes.CredentialSpecConfig{ - Config: "baz_config", - }, - } - configSpecs := map[string]composetypes.ConfigObjConfig{ - "foo_config": { - Name: "foo_config", - }, - "bar_config": { - Name: "bar_config", - }, - "baz_config": { - Name: "baz_config", - }, - } - client := &fakeClient{ - configListFunc: func(opts types.ConfigListOptions) ([]swarm.Config, error) { - assert.Check(t, is.Contains(opts.Filters.Get("name"), "foo_config")) - assert.Check(t, is.Contains(opts.Filters.Get("name"), "bar_config")) - assert.Check(t, is.Contains(opts.Filters.Get("name"), "baz_config")) - return []swarm.Config{ - {Spec: swarm.ConfigSpec{Annotations: swarm.Annotations{Name: "foo_config"}}}, - {Spec: swarm.ConfigSpec{Annotations: swarm.Annotations{Name: "bar_config"}}}, - {Spec: swarm.ConfigSpec{Annotations: swarm.Annotations{Name: "baz_config"}}}, - }, nil - }, - } - refs, err := convertServiceConfigObjs(client, namespace, service, configSpecs) - assert.NilError(t, err) - expected := []*swarm.ConfigReference{ - { - ConfigName: "bar_config", - File: &swarm.ConfigReferenceFileTarget{ - Name: "bar_config", - UID: "0", - GID: "0", - Mode: 0444, - }, - }, - { - ConfigName: "baz_config", - Runtime: &swarm.ConfigReferenceRuntimeTarget{}, - }, - { - ConfigName: "foo_config", - File: &swarm.ConfigReferenceFileTarget{ - Name: "foo_config", - UID: "0", - GID: "0", - Mode: 0444, - }, - }, - } - assert.DeepEqual(t, expected, refs) -} - -type fakeClient struct { - client.Client - secretListFunc func(types.SecretListOptions) ([]swarm.Secret, error) - configListFunc func(types.ConfigListOptions) ([]swarm.Config, error) -} - -func (c *fakeClient) SecretList(ctx context.Context, options types.SecretListOptions) ([]swarm.Secret, error) { - if c.secretListFunc != nil { - return c.secretListFunc(options) - } - return []swarm.Secret{}, nil -} - -func (c *fakeClient) ConfigList(ctx context.Context, options types.ConfigListOptions) ([]swarm.Config, error) { - if c.configListFunc != nil { - return c.configListFunc(options) - } - return []swarm.Config{}, nil -} - -func TestConvertUpdateConfigParallelism(t *testing.T) { - parallel := uint64(4) - - // test default behavior - updateConfig := convertUpdateConfig(&composetypes.UpdateConfig{}) - assert.Check(t, is.Equal(uint64(1), updateConfig.Parallelism)) - - // Non default value - updateConfig = convertUpdateConfig(&composetypes.UpdateConfig{ - Parallelism: ¶llel, - }) - assert.Check(t, is.Equal(parallel, updateConfig.Parallelism)) -} - -func TestConvertServiceCapAddAndCapDrop(t *testing.T) { - tests := []struct { - title string - in, out composetypes.ServiceConfig - }{ - { - title: "default behavior", - }, - { - title: "some values", - in: composetypes.ServiceConfig{ - CapAdd: []string{"SYS_NICE", "CAP_NET_ADMIN"}, - CapDrop: []string{"CHOWN", "CAP_NET_ADMIN", "DAC_OVERRIDE", "CAP_FSETID", "CAP_FOWNER"}, - }, - out: composetypes.ServiceConfig{ - CapAdd: []string{"CAP_NET_ADMIN", "CAP_SYS_NICE"}, - CapDrop: []string{"CAP_CHOWN", "CAP_DAC_OVERRIDE", "CAP_FOWNER", "CAP_FSETID"}, - }, - }, - { - title: "adding ALL capabilities", - in: composetypes.ServiceConfig{ - CapAdd: []string{"ALL", "CAP_NET_ADMIN"}, - CapDrop: []string{"CHOWN", "CAP_NET_ADMIN", "DAC_OVERRIDE", "CAP_FSETID", "CAP_FOWNER"}, - }, - out: composetypes.ServiceConfig{ - CapAdd: []string{"ALL"}, - CapDrop: []string{"CAP_CHOWN", "CAP_DAC_OVERRIDE", "CAP_FOWNER", "CAP_FSETID", "CAP_NET_ADMIN"}, - }, - }, - { - title: "dropping ALL capabilities", - in: composetypes.ServiceConfig{ - CapAdd: []string{"CHOWN", "CAP_NET_ADMIN", "DAC_OVERRIDE", "CAP_FSETID", "CAP_FOWNER"}, - CapDrop: []string{"ALL", "CAP_NET_ADMIN", "CAP_FOO"}, - }, - out: composetypes.ServiceConfig{ - CapAdd: []string{"CAP_CHOWN", "CAP_DAC_OVERRIDE", "CAP_FOWNER", "CAP_FSETID", "CAP_NET_ADMIN"}, - CapDrop: []string{"ALL"}, - }, - }, - } - for _, tc := range tests { - tc := tc - t.Run(tc.title, func(t *testing.T) { - result, err := Service("1.41", Namespace{name: "foo"}, tc.in, nil, nil, nil, nil) - assert.NilError(t, err) - assert.Check(t, is.DeepEqual(result.TaskTemplate.ContainerSpec.CapabilityAdd, tc.out.CapAdd)) - assert.Check(t, is.DeepEqual(result.TaskTemplate.ContainerSpec.CapabilityDrop, tc.out.CapDrop)) - }) - } -} diff --git a/pkg/upstream/convert/volume.go b/pkg/upstream/convert/volume.go index 577f30de..93b148db 100644 --- a/pkg/upstream/convert/volume.go +++ b/pkg/upstream/convert/volume.go @@ -2,15 +2,15 @@ package convert // https://github.com/docker/cli/blob/master/cli/compose/convert import ( "coopcloud.tech/abra/pkg/i18n" - composetypes "github.com/docker/cli/cli/compose/types" + composeGoTypes "github.com/compose-spec/compose-go/v2/types" "github.com/docker/docker/api/types/mount" "github.com/pkg/errors" ) -type volumes map[string]composetypes.VolumeConfig +type volumes map[string]composeGoTypes.VolumeConfig // Volumes from compose-file types to engine api types -func Volumes(serviceVolumes []composetypes.ServiceVolumeConfig, stackVolumes volumes, namespace Namespace) ([]mount.Mount, error) { +func Volumes(serviceVolumes []composeGoTypes.ServiceVolumeConfig, stackVolumes volumes, namespace Namespace) ([]mount.Mount, error) { var mounts []mount.Mount for _, volumeConfig := range serviceVolumes { @@ -23,7 +23,7 @@ func Volumes(serviceVolumes []composetypes.ServiceVolumeConfig, stackVolumes vol return mounts, nil } -func createMountFromVolume(volume composetypes.ServiceVolumeConfig) mount.Mount { +func createMountFromVolume(volume composeGoTypes.ServiceVolumeConfig) mount.Mount { return mount.Mount{ Type: mount.Type(volume.Type), Target: volume.Target, @@ -34,7 +34,7 @@ func createMountFromVolume(volume composetypes.ServiceVolumeConfig) mount.Mount } func handleVolumeToMount( - volume composetypes.ServiceVolumeConfig, + volume composeGoTypes.ServiceVolumeConfig, stackVolumes volumes, namespace Namespace, ) (mount.Mount, error) { @@ -68,7 +68,7 @@ func handleVolumeToMount( } // External named volumes - if stackVolume.External.External { + if stackVolume.External { return result, nil } @@ -83,7 +83,7 @@ func handleVolumeToMount( return result, nil } -func handleBindToMount(volume composetypes.ServiceVolumeConfig) (mount.Mount, error) { +func handleBindToMount(volume composeGoTypes.ServiceVolumeConfig) (mount.Mount, error) { result := createMountFromVolume(volume) if volume.Source == "" { @@ -103,7 +103,7 @@ func handleBindToMount(volume composetypes.ServiceVolumeConfig) (mount.Mount, er return result, nil } -func handleTmpfsToMount(volume composetypes.ServiceVolumeConfig) (mount.Mount, error) { +func handleTmpfsToMount(volume composeGoTypes.ServiceVolumeConfig) (mount.Mount, error) { result := createMountFromVolume(volume) if volume.Source != "" { @@ -117,13 +117,13 @@ func handleTmpfsToMount(volume composetypes.ServiceVolumeConfig) (mount.Mount, e } if volume.Tmpfs != nil { result.TmpfsOptions = &mount.TmpfsOptions{ - SizeBytes: volume.Tmpfs.Size, + SizeBytes: int64(volume.Tmpfs.Size), } } return result, nil } -func handleNpipeToMount(volume composetypes.ServiceVolumeConfig) (mount.Mount, error) { +func handleNpipeToMount(volume composeGoTypes.ServiceVolumeConfig) (mount.Mount, error) { result := createMountFromVolume(volume) if volume.Source == "" { @@ -144,7 +144,7 @@ func handleNpipeToMount(volume composetypes.ServiceVolumeConfig) (mount.Mount, e } func convertVolumeToMount( - volume composetypes.ServiceVolumeConfig, + volume composeGoTypes.ServiceVolumeConfig, stackVolumes volumes, namespace Namespace, ) (mount.Mount, error) { diff --git a/pkg/upstream/convert/volume_test.go b/pkg/upstream/convert/volume_test.go deleted file mode 100644 index 6775a397..00000000 --- a/pkg/upstream/convert/volume_test.go +++ /dev/null @@ -1,361 +0,0 @@ -package convert // https://github.com/docker/cli/blob/master/cli/compose/convert/volume_test.go - -import ( - "testing" - - composetypes "github.com/docker/cli/cli/compose/types" - "github.com/docker/docker/api/types/mount" - "gotest.tools/v3/assert" - is "gotest.tools/v3/assert/cmp" -) - -func TestConvertVolumeToMountAnonymousVolume(t *testing.T) { - config := composetypes.ServiceVolumeConfig{ - Type: "volume", - Target: "/foo/bar", - } - expected := mount.Mount{ - Type: mount.TypeVolume, - Target: "/foo/bar", - } - mount, err := convertVolumeToMount(config, volumes{}, NewNamespace("foo")) - assert.NilError(t, err) - assert.Check(t, is.DeepEqual(expected, mount)) -} - -func TestConvertVolumeToMountAnonymousBind(t *testing.T) { - config := composetypes.ServiceVolumeConfig{ - Type: "bind", - Target: "/foo/bar", - Bind: &composetypes.ServiceVolumeBind{ - Propagation: "slave", - }, - } - _, err := convertVolumeToMount(config, volumes{}, NewNamespace("foo")) - assert.Error(t, err, "invalid bind source, source cannot be empty") -} - -func TestConvertVolumeToMountUnapprovedType(t *testing.T) { - config := composetypes.ServiceVolumeConfig{ - Type: "foo", - Target: "/foo/bar", - } - _, err := convertVolumeToMount(config, volumes{}, NewNamespace("foo")) - assert.Error(t, err, "volume type must be volume, bind, tmpfs or npipe") -} - -func TestConvertVolumeToMountConflictingOptionsBindInVolume(t *testing.T) { - namespace := NewNamespace("foo") - - config := composetypes.ServiceVolumeConfig{ - Type: "volume", - Source: "foo", - Target: "/target", - Bind: &composetypes.ServiceVolumeBind{ - Propagation: "slave", - }, - } - _, err := convertVolumeToMount(config, volumes{}, namespace) - assert.Error(t, err, "bind options are incompatible with type volume") -} - -func TestConvertVolumeToMountConflictingOptionsTmpfsInVolume(t *testing.T) { - namespace := NewNamespace("foo") - - config := composetypes.ServiceVolumeConfig{ - Type: "volume", - Source: "foo", - Target: "/target", - Tmpfs: &composetypes.ServiceVolumeTmpfs{ - Size: 1000, - }, - } - _, err := convertVolumeToMount(config, volumes{}, namespace) - assert.Error(t, err, "tmpfs options are incompatible with type volume") -} - -func TestConvertVolumeToMountConflictingOptionsVolumeInBind(t *testing.T) { - namespace := NewNamespace("foo") - - config := composetypes.ServiceVolumeConfig{ - Type: "bind", - Source: "/foo", - Target: "/target", - Volume: &composetypes.ServiceVolumeVolume{ - NoCopy: true, - }, - } - _, err := convertVolumeToMount(config, volumes{}, namespace) - assert.Error(t, err, "volume options are incompatible with type bind") -} - -func TestConvertVolumeToMountConflictingOptionsTmpfsInBind(t *testing.T) { - namespace := NewNamespace("foo") - - config := composetypes.ServiceVolumeConfig{ - Type: "bind", - Source: "/foo", - Target: "/target", - Tmpfs: &composetypes.ServiceVolumeTmpfs{ - Size: 1000, - }, - } - _, err := convertVolumeToMount(config, volumes{}, namespace) - assert.Error(t, err, "tmpfs options are incompatible with type bind") -} - -func TestConvertVolumeToMountConflictingOptionsBindInTmpfs(t *testing.T) { - namespace := NewNamespace("foo") - - config := composetypes.ServiceVolumeConfig{ - Type: "tmpfs", - Target: "/target", - Bind: &composetypes.ServiceVolumeBind{ - Propagation: "slave", - }, - } - _, err := convertVolumeToMount(config, volumes{}, namespace) - assert.Error(t, err, "bind options are incompatible with type tmpfs") -} - -func TestConvertVolumeToMountConflictingOptionsVolumeInTmpfs(t *testing.T) { - namespace := NewNamespace("foo") - - config := composetypes.ServiceVolumeConfig{ - Type: "tmpfs", - Target: "/target", - Volume: &composetypes.ServiceVolumeVolume{ - NoCopy: true, - }, - } - _, err := convertVolumeToMount(config, volumes{}, namespace) - assert.Error(t, err, "volume options are incompatible with type tmpfs") -} - -func TestConvertVolumeToMountNamedVolume(t *testing.T) { - stackVolumes := volumes{ - "normal": composetypes.VolumeConfig{ - Driver: "glusterfs", - DriverOpts: map[string]string{ - "opt": "value", - }, - Labels: map[string]string{ - "something": "labeled", - }, - }, - } - namespace := NewNamespace("foo") - expected := mount.Mount{ - Type: mount.TypeVolume, - Source: "foo_normal", - Target: "/foo", - ReadOnly: true, - VolumeOptions: &mount.VolumeOptions{ - Labels: map[string]string{ - LabelNamespace: "foo", - "something": "labeled", - }, - DriverConfig: &mount.Driver{ - Name: "glusterfs", - Options: map[string]string{ - "opt": "value", - }, - }, - NoCopy: true, - }, - } - config := composetypes.ServiceVolumeConfig{ - Type: "volume", - Source: "normal", - Target: "/foo", - ReadOnly: true, - Volume: &composetypes.ServiceVolumeVolume{ - NoCopy: true, - }, - } - mount, err := convertVolumeToMount(config, stackVolumes, namespace) - assert.NilError(t, err) - assert.Check(t, is.DeepEqual(expected, mount)) -} - -func TestConvertVolumeToMountNamedVolumeWithNameCustomizd(t *testing.T) { - stackVolumes := volumes{ - "normal": composetypes.VolumeConfig{ - Name: "user_specified_name", - Driver: "vsphere", - DriverOpts: map[string]string{ - "opt": "value", - }, - Labels: map[string]string{ - "something": "labeled", - }, - }, - } - namespace := NewNamespace("foo") - expected := mount.Mount{ - Type: mount.TypeVolume, - Source: "user_specified_name", - Target: "/foo", - ReadOnly: true, - VolumeOptions: &mount.VolumeOptions{ - Labels: map[string]string{ - LabelNamespace: "foo", - "something": "labeled", - }, - DriverConfig: &mount.Driver{ - Name: "vsphere", - Options: map[string]string{ - "opt": "value", - }, - }, - NoCopy: true, - }, - } - config := composetypes.ServiceVolumeConfig{ - Type: "volume", - Source: "normal", - Target: "/foo", - ReadOnly: true, - Volume: &composetypes.ServiceVolumeVolume{ - NoCopy: true, - }, - } - mount, err := convertVolumeToMount(config, stackVolumes, namespace) - assert.NilError(t, err) - assert.Check(t, is.DeepEqual(expected, mount)) -} - -func TestConvertVolumeToMountNamedVolumeExternal(t *testing.T) { - stackVolumes := volumes{ - "outside": composetypes.VolumeConfig{ - Name: "special", - External: composetypes.External{External: true}, - }, - } - namespace := NewNamespace("foo") - expected := mount.Mount{ - Type: mount.TypeVolume, - Source: "special", - Target: "/foo", - VolumeOptions: &mount.VolumeOptions{NoCopy: false}, - } - config := composetypes.ServiceVolumeConfig{ - Type: "volume", - Source: "outside", - Target: "/foo", - } - mount, err := convertVolumeToMount(config, stackVolumes, namespace) - assert.NilError(t, err) - assert.Check(t, is.DeepEqual(expected, mount)) -} - -func TestConvertVolumeToMountNamedVolumeExternalNoCopy(t *testing.T) { - stackVolumes := volumes{ - "outside": composetypes.VolumeConfig{ - Name: "special", - External: composetypes.External{External: true}, - }, - } - namespace := NewNamespace("foo") - expected := mount.Mount{ - Type: mount.TypeVolume, - Source: "special", - Target: "/foo", - VolumeOptions: &mount.VolumeOptions{ - NoCopy: true, - }, - } - config := composetypes.ServiceVolumeConfig{ - Type: "volume", - Source: "outside", - Target: "/foo", - Volume: &composetypes.ServiceVolumeVolume{ - NoCopy: true, - }, - } - mount, err := convertVolumeToMount(config, stackVolumes, namespace) - assert.NilError(t, err) - assert.Check(t, is.DeepEqual(expected, mount)) -} - -func TestConvertVolumeToMountBind(t *testing.T) { - stackVolumes := volumes{} - namespace := NewNamespace("foo") - expected := mount.Mount{ - Type: mount.TypeBind, - Source: "/bar", - Target: "/foo", - ReadOnly: true, - BindOptions: &mount.BindOptions{Propagation: mount.PropagationShared}, - } - config := composetypes.ServiceVolumeConfig{ - Type: "bind", - Source: "/bar", - Target: "/foo", - ReadOnly: true, - Bind: &composetypes.ServiceVolumeBind{Propagation: "shared"}, - } - mount, err := convertVolumeToMount(config, stackVolumes, namespace) - assert.NilError(t, err) - assert.Check(t, is.DeepEqual(expected, mount)) -} - -func TestConvertVolumeToMountVolumeDoesNotExist(t *testing.T) { - namespace := NewNamespace("foo") - config := composetypes.ServiceVolumeConfig{ - Type: "volume", - Source: "unknown", - Target: "/foo", - ReadOnly: true, - } - _, err := convertVolumeToMount(config, volumes{}, namespace) - assert.Error(t, err, "undefined volume \"unknown\"") -} - -func TestConvertTmpfsToMountVolume(t *testing.T) { - config := composetypes.ServiceVolumeConfig{ - Type: "tmpfs", - Target: "/foo/bar", - Tmpfs: &composetypes.ServiceVolumeTmpfs{ - Size: 1000, - }, - } - expected := mount.Mount{ - Type: mount.TypeTmpfs, - Target: "/foo/bar", - TmpfsOptions: &mount.TmpfsOptions{SizeBytes: 1000}, - } - mount, err := convertVolumeToMount(config, volumes{}, NewNamespace("foo")) - assert.NilError(t, err) - assert.Check(t, is.DeepEqual(expected, mount)) -} - -func TestConvertTmpfsToMountVolumeWithSource(t *testing.T) { - config := composetypes.ServiceVolumeConfig{ - Type: "tmpfs", - Source: "/bar", - Target: "/foo/bar", - Tmpfs: &composetypes.ServiceVolumeTmpfs{ - Size: 1000, - }, - } - - _, err := convertVolumeToMount(config, volumes{}, NewNamespace("foo")) - assert.Error(t, err, "invalid tmpfs source, source must be empty") -} - -func TestConvertVolumeToMountAnonymousNpipe(t *testing.T) { - config := composetypes.ServiceVolumeConfig{ - Type: "npipe", - Source: `\\.\pipe\foo`, - Target: `\\.\pipe\foo`, - } - expected := mount.Mount{ - Type: mount.TypeNamedPipe, - Source: `\\.\pipe\foo`, - Target: `\\.\pipe\foo`, - } - mount, err := convertVolumeToMount(config, volumes{}, NewNamespace("foo")) - assert.NilError(t, err) - assert.Check(t, is.DeepEqual(expected, mount)) -} diff --git a/pkg/upstream/stack/loader.go b/pkg/upstream/stack/loader.go index a6259903..301ca556 100644 --- a/pkg/upstream/stack/loader.go +++ b/pkg/upstream/stack/loader.go @@ -1,6 +1,7 @@ package stack // https://github.com/docker/cli/blob/master/cli/command/stack/loader/loader.go import ( + "context" "fmt" "io/ioutil" "path/filepath" @@ -8,58 +9,58 @@ import ( "strings" "coopcloud.tech/abra/pkg/i18n" - "coopcloud.tech/abra/pkg/log" + composeGoCli "github.com/compose-spec/compose-go/v2/cli" + composeGoTypes "github.com/compose-spec/compose-go/v2/types" "github.com/docker/cli/cli/compose/loader" "github.com/docker/cli/cli/compose/schema" composetypes "github.com/docker/cli/cli/compose/types" "github.com/pkg/errors" ) -// DontSkipValidation ensures validation is done for compose file loading -func DontSkipValidation(opts *loader.Options) { - opts.SkipValidation = false +type LoadConf struct { + ComposeFiles []string + AppEnv map[string]string } -// SkipInterpolation skip interpolating environment variables. -func SkipInterpolation(opts *loader.Options) { - opts.SkipInterpolation = true -} +func LoadCompose(conf LoadConf) (*composeGoTypes.Project, error) { + var project *composeGoTypes.Project -// LoadComposefile parse the composefile specified in the cli and returns its Config and version. -func LoadComposefile(opts Deploy, appEnv map[string]string, options ...func(*loader.Options)) (*composetypes.Config, error) { - configDetails, err := getConfigDetails(opts.Composefiles, appEnv) - if err != nil { - return nil, err + var projectOptions *composeGoCli.ProjectOptions + if len(conf.ComposeFiles) == 0 { + return project, errors.New(i18n.G("LoadCompose: provide compose files")) } - if options == nil { - options = []func(*loader.Options){DontSkipValidation} - } - - dicts := getDictsFrom(configDetails.ConfigFiles) - config, err := loader.Load(configDetails, options...) - if err != nil { - if fpe, ok := err.(*loader.ForbiddenPropertiesError); ok { - return nil, errors.New(i18n.G("compose file contains unsupported options: %s", propertyWarnings(fpe.Properties))) + if len(conf.AppEnv) == 0 { + var err error + projectOptions, err = composeGoCli.NewProjectOptions( + conf.ComposeFiles, + composeGoCli.WithInterpolation(false), + ) + if err != nil { + return project, err + } + } else { + var env []string + for k, v := range conf.AppEnv { + env = append(env, fmt.Sprintf("%s=%s", k, v)) + } + + var err error + projectOptions, err = composeGoCli.NewProjectOptions( + conf.ComposeFiles, + composeGoCli.WithEnv(env), + ) + if err != nil { + return project, err } - return nil, err } - recipeName, exists := appEnv["RECIPE"] - if !exists { - recipeName, _ = appEnv["TYPE"] + project, err := projectOptions.LoadProject(context.Background()) + if err != nil { + return project, err } - unsupportedProperties := loader.GetUnsupportedProperties(dicts...) - if len(unsupportedProperties) > 0 { - log.Warn(i18n.G("%s: ignoring unsupported options: %s", recipeName, strings.Join(unsupportedProperties, ", "))) - } - - deprecatedProperties := loader.GetDeprecatedProperties(dicts...) - if len(deprecatedProperties) > 0 { - log.Warn(i18n.G("%s: ignoring deprecated options: %s", recipeName, propertyWarnings(deprecatedProperties))) - } - return config, nil + return project, nil } func getDictsFrom(configFiles []composetypes.ConfigFile) []map[string]interface{} { diff --git a/pkg/upstream/stack/loader_test.go b/pkg/upstream/stack/loader_test.go new file mode 100644 index 00000000..138c3b72 --- /dev/null +++ b/pkg/upstream/stack/loader_test.go @@ -0,0 +1,26 @@ +package stack_test // https://github.com/docker/cli/blob/master/cli/command/stack/loader/loader.go + +import ( + "testing" + + "coopcloud.tech/abra/pkg/app" + "coopcloud.tech/abra/pkg/test" +) + +func TestSkipInterpolation(t *testing.T) { + test.Setup() + t.Cleanup(func() { test.Teardown() }) + + a, err := app.Get(test.AppName) + if err != nil { + t.Fatal(err) + } + + _, err = a.Recipe.GetComposeConfig() + if err != nil { + t.Fatal(err) + } + + // TODO: ensure compose has a port with no interpolated value + // TODO: ensure compose has port with interpolated value +} diff --git a/pkg/upstream/stack/stack.go b/pkg/upstream/stack/stack.go index 2e19c138..a4257f70 100644 --- a/pkg/upstream/stack/stack.go +++ b/pkg/upstream/stack/stack.go @@ -12,6 +12,7 @@ import ( stdlibErr "errors" tea "github.com/charmbracelet/bubbletea" + composeGoTypes "github.com/compose-spec/compose-go/v2/types" "coopcloud.tech/abra/pkg/config" "coopcloud.tech/abra/pkg/i18n" @@ -20,7 +21,6 @@ import ( "coopcloud.tech/abra/pkg/upstream/convert" "github.com/docker/cli/cli/command" "github.com/docker/cli/cli/command/stack/formatter" - composetypes "github.com/docker/cli/cli/compose/types" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/container" "github.com/docker/docker/api/types/filters" @@ -197,7 +197,7 @@ func pruneServices(ctx context.Context, cl *dockerClient.Client, namespace conve func RunDeploy( cl *dockerClient.Client, opts Deploy, - cfg *composetypes.Config, + cfg *composeGoTypes.Project, appName string, serverName string, dontWait bool, @@ -246,7 +246,7 @@ func deployCompose( ctx context.Context, cl *dockerClient.Client, opts Deploy, - config *composetypes.Config, + config *composeGoTypes.Project, appName string, serverName string, dontWait bool, @@ -325,7 +325,7 @@ func deployCompose( return nil } -func getServicesDeclaredNetworks(serviceConfigs []composetypes.ServiceConfig) map[string]struct{} { +func getServicesDeclaredNetworks(serviceConfigs map[string]composeGoTypes.ServiceConfig) map[string]struct{} { serviceNetworks := map[string]struct{}{} for _, serviceConfig := range serviceConfigs { if len(serviceConfig.Networks) == 0 { diff --git a/tests/resources/test_recipe/compose.interpolate.yml b/tests/resources/test_recipe/compose.interpolate.yml new file mode 100644 index 00000000..db8963f4 --- /dev/null +++ b/tests/resources/test_recipe/compose.interpolate.yml @@ -0,0 +1,7 @@ +--- + +services: + app: + ports: + - target: 22 + published: ${PORT} diff --git a/tests/resources/test_recipe/compose.yml b/tests/resources/test_recipe/compose.yml index 638fd142..08f56800 100644 --- a/tests/resources/test_recipe/compose.yml +++ b/tests/resources/test_recipe/compose.yml @@ -1,5 +1,4 @@ --- -version: "3.8" services: app: diff --git a/vendor/github.com/compose-spec/compose-go/v2/LICENSE b/vendor/github.com/compose-spec/compose-go/v2/LICENSE new file mode 100644 index 00000000..1d97deaf --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/LICENSE @@ -0,0 +1,191 @@ + + Apache License + Version 2.0, January 2004 + https://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + Copyright 2020 The Compose Specification Authors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + https://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/compose-spec/compose-go/v2/NOTICE b/vendor/github.com/compose-spec/compose-go/v2/NOTICE new file mode 100644 index 00000000..9c275547 --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/NOTICE @@ -0,0 +1,2 @@ +The Compose Specification +Copyright 2020 The Compose Specification Authors diff --git a/vendor/github.com/compose-spec/compose-go/v2/cli/options.go b/vendor/github.com/compose-spec/compose-go/v2/cli/options.go new file mode 100644 index 00000000..69ea5654 --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/cli/options.go @@ -0,0 +1,590 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package cli + +import ( + "context" + "io" + "os" + "path/filepath" + "strconv" + "strings" + + "github.com/sirupsen/logrus" + "go.yaml.in/yaml/v4" + + "github.com/compose-spec/compose-go/v2/consts" + "github.com/compose-spec/compose-go/v2/dotenv" + "github.com/compose-spec/compose-go/v2/loader" + "github.com/compose-spec/compose-go/v2/types" + "github.com/compose-spec/compose-go/v2/utils" +) + +// ProjectOptions provides common configuration for loading a project. +type ProjectOptions struct { + // Name is a valid Compose project name to be used or empty. + // + // If empty, the project loader will automatically infer a reasonable + // project name if possible. + Name string + + // WorkingDir is a file path to use as the project directory or empty. + // + // If empty, the project loader will automatically infer a reasonable + // working directory if possible. + WorkingDir string + + // ConfigPaths are file paths to one or more Compose files. + // + // These are applied in order by the loader following the override logic + // as described in the spec. + // + // The first entry is required and is the primary Compose file. + // For convenience, WithConfigFileEnv and WithDefaultConfigPath + // are provided to populate this in a predictable manner. + ConfigPaths []string + + // Environment are additional environment variables to make available + // for interpolation. + // + // NOTE: For security, the loader does not automatically expose any + // process environment variables. For convenience, WithOsEnv can be + // used if appropriate. + Environment types.Mapping + + // EnvFiles are file paths to ".env" files with additional environment + // variable data. + // + // These are loaded in-order, so it is possible to override variables or + // in subsequent files. + // + // This field is optional, but any file paths that are included here must + // exist or an error will be returned during load. + EnvFiles []string + + loadOptions []func(*loader.Options) + + // Callbacks to retrieve metadata information during parse defined before + // creating the project + Listeners []loader.Listener + // ResourceLoaders manages support for remote resources + ResourceLoaders []loader.ResourceLoader +} + +type ProjectOptionsFn func(*ProjectOptions) error + +// NewProjectOptions creates ProjectOptions +func NewProjectOptions(configs []string, opts ...ProjectOptionsFn) (*ProjectOptions, error) { + options := &ProjectOptions{ + ConfigPaths: configs, + Environment: map[string]string{}, + Listeners: []loader.Listener{}, + } + for _, o := range opts { + err := o(options) + if err != nil { + return nil, err + } + } + return options, nil +} + +// WithName defines ProjectOptions' name +func WithName(name string) ProjectOptionsFn { + return func(o *ProjectOptions) error { + // a project (once loaded) cannot have an empty name + // however, on the options object, the name is optional: if unset, + // a name will be inferred by the loader, so it's legal to set the + // name to an empty string here + if name != loader.NormalizeProjectName(name) { + return loader.InvalidProjectNameErr(name) + } + o.Name = name + return nil + } +} + +// WithWorkingDirectory defines ProjectOptions' working directory +func WithWorkingDirectory(wd string) ProjectOptionsFn { + return func(o *ProjectOptions) error { + if wd == "" { + return nil + } + abs, err := filepath.Abs(wd) + if err != nil { + return err + } + o.WorkingDir = abs + return nil + } +} + +// WithConfigFileEnv allow to set compose config file paths by COMPOSE_FILE environment variable +func WithConfigFileEnv(o *ProjectOptions) error { + if len(o.ConfigPaths) > 0 { + return nil + } + sep := o.Environment[consts.ComposePathSeparator] + if sep == "" { + sep = string(os.PathListSeparator) + } + f, ok := o.Environment[consts.ComposeFilePath] + if ok { + paths, err := absolutePaths(strings.Split(f, sep)) + o.ConfigPaths = paths + return err + } + return nil +} + +// WithDefaultConfigPath searches for default config files from working directory +func WithDefaultConfigPath(o *ProjectOptions) error { + if len(o.ConfigPaths) > 0 { + return nil + } + pwd, err := o.GetWorkingDir() + if err != nil { + return err + } + for { + candidates := findFiles(DefaultFileNames, pwd) + if len(candidates) > 0 { + winner := candidates[0] + if len(candidates) > 1 { + logrus.Warnf("Found multiple config files with supported names: %s", strings.Join(candidates, ", ")) + logrus.Warnf("Using %s", winner) + } + o.ConfigPaths = append(o.ConfigPaths, winner) + + overrides := findFiles(DefaultOverrideFileNames, pwd) + if len(overrides) > 0 { + if len(overrides) > 1 { + logrus.Warnf("Found multiple override files with supported names: %s", strings.Join(overrides, ", ")) + logrus.Warnf("Using %s", overrides[0]) + } + o.ConfigPaths = append(o.ConfigPaths, overrides[0]) + } + return nil + } + parent := filepath.Dir(pwd) + if parent == pwd { + // no config file found, but that's not a blocker if caller only needs project name + return nil + } + pwd = parent + } +} + +// WithEnv defines a key=value set of variables used for compose file interpolation +func WithEnv(env []string) ProjectOptionsFn { + return func(o *ProjectOptions) error { + for k, v := range utils.GetAsEqualsMap(env) { + o.Environment[k] = v + } + return nil + } +} + +// WithDiscardEnvFile sets discards the `env_file` section after resolving to +// the `environment` section +func WithDiscardEnvFile(o *ProjectOptions) error { + o.loadOptions = append(o.loadOptions, loader.WithDiscardEnvFiles) + return nil +} + +// WithLoadOptions provides a hook to control how compose files are loaded +func WithLoadOptions(loadOptions ...func(*loader.Options)) ProjectOptionsFn { + return func(o *ProjectOptions) error { + o.loadOptions = append(o.loadOptions, loadOptions...) + return nil + } +} + +// WithDefaultProfiles uses the provided profiles (if any), and falls back to +// profiles specified via the COMPOSE_PROFILES environment variable otherwise. +func WithDefaultProfiles(profiles ...string) ProjectOptionsFn { + return func(o *ProjectOptions) error { + if len(profiles) == 0 { + for _, s := range strings.Split(o.Environment[consts.ComposeProfiles], ",") { + profiles = append(profiles, strings.TrimSpace(s)) + } + } + o.loadOptions = append(o.loadOptions, loader.WithProfiles(profiles)) + return nil + } +} + +// WithProfiles sets profiles to be activated +func WithProfiles(profiles []string) ProjectOptionsFn { + return func(o *ProjectOptions) error { + o.loadOptions = append(o.loadOptions, loader.WithProfiles(profiles)) + return nil + } +} + +// WithOsEnv imports environment variables from OS +func WithOsEnv(o *ProjectOptions) error { + for k, v := range utils.GetAsEqualsMap(os.Environ()) { + if _, set := o.Environment[k]; set { + continue + } + o.Environment[k] = v + } + return nil +} + +// WithEnvFile sets an alternate env file. +// +// Deprecated: use WithEnvFiles instead. +func WithEnvFile(file string) ProjectOptionsFn { + var files []string + if file != "" { + files = []string{file} + } + return WithEnvFiles(files...) +} + +// WithEnvFiles set env file(s) to be loaded to set project environment. +// defaults to local .env file if no explicit file is selected, until COMPOSE_DISABLE_ENV_FILE is set +func WithEnvFiles(file ...string) ProjectOptionsFn { + return func(o *ProjectOptions) error { + if len(file) > 0 { + o.EnvFiles = file + return nil + } + if v, ok := os.LookupEnv(consts.ComposeDisableDefaultEnvFile); ok { + b, err := strconv.ParseBool(v) + if err != nil { + return err + } + if b { + return nil + } + } + + wd, err := o.GetWorkingDir() + if err != nil { + return err + } + defaultDotEnv := filepath.Join(wd, ".env") + + s, err := os.Stat(defaultDotEnv) + if os.IsNotExist(err) { + return nil + } + if err != nil { + return err + } + if !s.IsDir() { + o.EnvFiles = []string{defaultDotEnv} + } + return nil + } +} + +// WithDotEnv imports environment variables from .env file +func WithDotEnv(o *ProjectOptions) error { + envMap, err := dotenv.GetEnvFromFile(o.Environment, o.EnvFiles) + if err != nil { + return err + } + o.Environment.Merge(envMap) + return nil +} + +// WithInterpolation set ProjectOptions to enable/skip interpolation +func WithInterpolation(interpolation bool) ProjectOptionsFn { + return func(o *ProjectOptions) error { + o.loadOptions = append(o.loadOptions, func(options *loader.Options) { + options.SkipInterpolation = !interpolation + }) + return nil + } +} + +// WithNormalization set ProjectOptions to enable/skip normalization +func WithNormalization(normalization bool) ProjectOptionsFn { + return func(o *ProjectOptions) error { + o.loadOptions = append(o.loadOptions, func(options *loader.Options) { + options.SkipNormalization = !normalization + }) + return nil + } +} + +// WithConsistency set ProjectOptions to enable/skip consistency +func WithConsistency(consistency bool) ProjectOptionsFn { + return func(o *ProjectOptions) error { + o.loadOptions = append(o.loadOptions, func(options *loader.Options) { + options.SkipConsistencyCheck = !consistency + }) + return nil + } +} + +// WithResolvedPaths set ProjectOptions to enable paths resolution +func WithResolvedPaths(resolve bool) ProjectOptionsFn { + return func(o *ProjectOptions) error { + o.loadOptions = append(o.loadOptions, func(options *loader.Options) { + options.ResolvePaths = resolve + }) + return nil + } +} + +// WithResourceLoader register support for ResourceLoader to manage remote resources +func WithResourceLoader(r loader.ResourceLoader) ProjectOptionsFn { + return func(o *ProjectOptions) error { + o.ResourceLoaders = append(o.ResourceLoaders, r) + o.loadOptions = append(o.loadOptions, func(options *loader.Options) { + options.ResourceLoaders = o.ResourceLoaders + }) + return nil + } +} + +// WithExtension register a know extension `x-*` with the go struct type to decode into +func WithExtension(name string, typ any) ProjectOptionsFn { + return func(o *ProjectOptions) error { + o.loadOptions = append(o.loadOptions, func(options *loader.Options) { + if options.KnownExtensions == nil { + options.KnownExtensions = map[string]any{} + } + options.KnownExtensions[name] = typ + }) + return nil + } +} + +// Append listener to event +func (o *ProjectOptions) WithListeners(listeners ...loader.Listener) { + o.Listeners = append(o.Listeners, listeners...) +} + +// WithoutEnvironmentResolution disable environment resolution +func WithoutEnvironmentResolution(o *ProjectOptions) error { + o.loadOptions = append(o.loadOptions, func(options *loader.Options) { + options.SkipResolveEnvironment = true + }) + return nil +} + +// DefaultFileNames defines the Compose file names for auto-discovery (in order of preference) +var DefaultFileNames = []string{"compose.yaml", "compose.yml", "docker-compose.yml", "docker-compose.yaml"} + +// DefaultOverrideFileNames defines the Compose override file names for auto-discovery (in order of preference) +var DefaultOverrideFileNames = []string{"compose.override.yml", "compose.override.yaml", "docker-compose.override.yml", "docker-compose.override.yaml"} + +func (o *ProjectOptions) GetWorkingDir() (string, error) { + if o.WorkingDir != "" { + return filepath.Abs(o.WorkingDir) + } +PATH: + for _, path := range o.ConfigPaths { + if path != "-" { + for _, l := range o.ResourceLoaders { + if l.Accept(path) { + break PATH + } + } + absPath, err := filepath.Abs(path) + if err != nil { + return "", err + } + return filepath.Dir(absPath), nil + } + } + return os.Getwd() +} + +// ReadConfigFiles reads ConfigFiles and populates the content field +func (o *ProjectOptions) ReadConfigFiles(ctx context.Context, workingDir string, options *ProjectOptions) (*types.ConfigDetails, error) { + config, err := loader.LoadConfigFiles(ctx, options.ConfigPaths, workingDir, options.loadOptions...) + if err != nil { + return nil, err + } + configs := make([][]byte, len(config.ConfigFiles)) + + for i, c := range config.ConfigFiles { + var err error + var b []byte + if c.IsStdin() { + b, err = io.ReadAll(os.Stdin) + if err != nil { + return nil, err + } + } else { + f, err := filepath.Abs(c.Filename) + if err != nil { + return nil, err + } + b, err = os.ReadFile(f) + if err != nil { + return nil, err + } + } + configs[i] = b + } + for i, c := range configs { + config.ConfigFiles[i].Content = c + } + return config, nil +} + +// LoadProject loads compose file according to options and bind to types.Project go structs +func (o *ProjectOptions) LoadProject(ctx context.Context) (*types.Project, error) { + config, err := o.prepare(ctx) + if err != nil { + return nil, err + } + + project, err := loader.LoadWithContext(ctx, types.ConfigDetails{ + ConfigFiles: config.ConfigFiles, + WorkingDir: config.WorkingDir, + Environment: o.Environment, + }, o.loadOptions...) + if err != nil { + return nil, err + } + + for _, config := range config.ConfigFiles { + project.ComposeFiles = append(project.ComposeFiles, config.Filename) + } + + return project, nil +} + +// LoadModel loads compose file according to options and returns a raw (yaml tree) model +func (o *ProjectOptions) LoadModel(ctx context.Context) (map[string]any, error) { + configDetails, err := o.prepare(ctx) + if err != nil { + return nil, err + } + + return loader.LoadModelWithContext(ctx, *configDetails, o.loadOptions...) +} + +// prepare converts ProjectOptions into loader's types.ConfigDetails and configures default load options +func (o *ProjectOptions) prepare(ctx context.Context) (*types.ConfigDetails, error) { + defaultDir, err := o.GetWorkingDir() + if err != nil { + return &types.ConfigDetails{}, err + } + + configDetails, err := o.ReadConfigFiles(ctx, defaultDir, o) + if err != nil { + return configDetails, err + } + + isNamed := false + if o.Name == "" { + type named struct { + Name string `yaml:"name,omitempty"` + } + // if any of the compose file is named, this is equivalent to user passing --project-name + for _, cfg := range configDetails.ConfigFiles { + var n named + err = yaml.Unmarshal(cfg.Content, &n) + if err != nil { + return nil, err + } + if n.Name != "" { + isNamed = true + break + } + } + } + + o.loadOptions = append(o.loadOptions, + withNamePrecedenceLoad(defaultDir, isNamed, o), + withConvertWindowsPaths(o), + withListeners(o)) + + return configDetails, nil +} + +// ProjectFromOptions load a compose project based on command line options +// Deprecated: use ProjectOptions.LoadProject or ProjectOptions.LoadModel +func ProjectFromOptions(ctx context.Context, options *ProjectOptions) (*types.Project, error) { + return options.LoadProject(ctx) +} + +func withNamePrecedenceLoad(absWorkingDir string, namedInYaml bool, options *ProjectOptions) func(*loader.Options) { + return func(opts *loader.Options) { + if options.Name != "" { + opts.SetProjectName(options.Name, true) + } else if nameFromEnv, ok := options.Environment[consts.ComposeProjectName]; ok && nameFromEnv != "" { + opts.SetProjectName(nameFromEnv, true) + } else if !namedInYaml { + dirname := filepath.Base(absWorkingDir) + symlink, err := filepath.EvalSymlinks(absWorkingDir) + if err == nil && filepath.Base(symlink) != dirname { + logrus.Warnf("project has been loaded without an explicit name from a symlink. Using name %q", dirname) + } + opts.SetProjectName( + loader.NormalizeProjectName(dirname), + false, + ) + } + } +} + +func withConvertWindowsPaths(options *ProjectOptions) func(*loader.Options) { + return func(o *loader.Options) { + if o.ResolvePaths { + o.ConvertWindowsPaths = utils.StringToBool(options.Environment["COMPOSE_CONVERT_WINDOWS_PATHS"]) + } + } +} + +// save listeners from ProjectOptions (compose) to loader.Options +func withListeners(options *ProjectOptions) func(*loader.Options) { + return func(opts *loader.Options) { + opts.Listeners = append(opts.Listeners, options.Listeners...) + } +} + +func findFiles(names []string, pwd string) []string { + candidates := []string{} + for _, n := range names { + f := filepath.Join(pwd, n) + if _, err := os.Stat(f); err == nil { + candidates = append(candidates, f) + } + } + return candidates +} + +func absolutePaths(p []string) ([]string, error) { + var paths []string + for _, f := range p { + if f == "-" { + paths = append(paths, f) + continue + } + abs, err := filepath.Abs(f) + if err != nil { + return nil, err + } + f = abs + if _, err := os.Stat(f); err != nil { + return nil, err + } + paths = append(paths, f) + } + return paths, nil +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/consts/consts.go b/vendor/github.com/compose-spec/compose-go/v2/consts/consts.go new file mode 100644 index 00000000..592e6f06 --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/consts/consts.go @@ -0,0 +1,29 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package consts + +const ( + ComposeProjectName = "COMPOSE_PROJECT_NAME" + ComposePathSeparator = "COMPOSE_PATH_SEPARATOR" + ComposeFilePath = "COMPOSE_FILE" + ComposeDisableDefaultEnvFile = "COMPOSE_DISABLE_ENV_FILE" + ComposeProfiles = "COMPOSE_PROFILES" +) + +const Extensions = "#extensions" // Using # prefix, we prevent risk to conflict with an actual yaml key + +type ComposeFileKey struct{} diff --git a/vendor/github.com/compose-spec/compose-go/v2/dotenv/LICENSE b/vendor/github.com/compose-spec/compose-go/v2/dotenv/LICENSE new file mode 100644 index 00000000..9390caf6 --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/dotenv/LICENSE @@ -0,0 +1,22 @@ +Copyright (c) 2013 John Barton + +MIT License + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/compose-spec/compose-go/v2/dotenv/env.go b/vendor/github.com/compose-spec/compose-go/v2/dotenv/env.go new file mode 100644 index 00000000..fe8ce852 --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/dotenv/env.go @@ -0,0 +1,73 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package dotenv + +import ( + "bytes" + "fmt" + "os" + "path/filepath" +) + +func GetEnvFromFile(currentEnv map[string]string, filenames []string) (map[string]string, error) { + envMap := make(map[string]string) + + for _, dotEnvFile := range filenames { + abs, err := filepath.Abs(dotEnvFile) + if err != nil { + return envMap, err + } + dotEnvFile = abs + + s, err := os.Stat(dotEnvFile) + if os.IsNotExist(err) { + return envMap, fmt.Errorf("couldn't find env file: %s", dotEnvFile) + } + if err != nil { + return envMap, err + } + + if s.IsDir() { + if len(filenames) == 0 { + return envMap, nil + } + return envMap, fmt.Errorf("%s is a directory", dotEnvFile) + } + + b, err := os.ReadFile(dotEnvFile) + if os.IsNotExist(err) { + return nil, fmt.Errorf("couldn't read env file: %s", dotEnvFile) + } + if err != nil { + return envMap, err + } + + err = parseWithLookup(bytes.NewReader(b), envMap, func(k string) (string, bool) { + v, ok := currentEnv[k] + if ok { + return v, true + } + v, ok = envMap[k] + return v, ok + }) + if err != nil { + return envMap, fmt.Errorf("failed to read %s: %w", dotEnvFile, err) + } + } + + return envMap, nil +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/dotenv/format.go b/vendor/github.com/compose-spec/compose-go/v2/dotenv/format.go new file mode 100644 index 00000000..0234716c --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/dotenv/format.go @@ -0,0 +1,51 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package dotenv + +import ( + "fmt" + "io" +) + +const DotEnv = ".env" + +var formats = map[string]Parser{ + DotEnv: func(r io.Reader, filename string, vars map[string]string, lookup func(key string) (string, bool)) error { + err := parseWithLookup(r, vars, lookup) + if err != nil { + return fmt.Errorf("failed to read %s: %w", filename, err) + } + return nil + }, +} + +type Parser func(r io.Reader, filename string, vars map[string]string, lookup func(key string) (string, bool)) error + +func RegisterFormat(format string, p Parser) { + formats[format] = p +} + +func ParseWithFormat(r io.Reader, filename string, vars map[string]string, resolve LookupFn, format string) error { + if format == "" { + format = DotEnv + } + fn, ok := formats[format] + if !ok { + return fmt.Errorf("unsupported env_file format %q", format) + } + return fn(r, filename, vars, resolve) +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/dotenv/godotenv.go b/vendor/github.com/compose-spec/compose-go/v2/dotenv/godotenv.go new file mode 100644 index 00000000..8f4fdf6a --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/dotenv/godotenv.go @@ -0,0 +1,182 @@ +// Package dotenv is a go port of the ruby dotenv library (https://github.com/bkeepers/dotenv) +// +// Examples/readme can be found on the github page at https://github.com/joho/godotenv +// +// The TL;DR is that you make a .env file that looks something like +// +// SOME_ENV_VAR=somevalue +// +// and then in your go code you can call +// +// godotenv.Load() +// +// and all the env vars declared in .env will be available through os.Getenv("SOME_ENV_VAR") +package dotenv + +import ( + "bytes" + "io" + "os" + "regexp" + "strings" + + "github.com/compose-spec/compose-go/v2/template" +) + +var utf8BOM = []byte("\uFEFF") + +var startsWithDigitRegex = regexp.MustCompile(`^\s*\d.*`) // Keys starting with numbers are ignored + +// LookupFn represents a lookup function to resolve variables from +type LookupFn func(string) (string, bool) + +var noLookupFn = func(_ string) (string, bool) { + return "", false +} + +// Parse reads an env file from io.Reader, returning a map of keys and values. +func Parse(r io.Reader) (map[string]string, error) { + return ParseWithLookup(r, nil) +} + +// ParseWithLookup reads an env file from io.Reader, returning a map of keys and values. +func ParseWithLookup(r io.Reader, lookupFn LookupFn) (map[string]string, error) { + vars := map[string]string{} + err := parseWithLookup(r, vars, lookupFn) + return vars, err +} + +// ParseWithLookup reads an env file from io.Reader, returning a map of keys and values. +func parseWithLookup(r io.Reader, vars map[string]string, lookupFn LookupFn) error { + data, err := io.ReadAll(r) + if err != nil { + return err + } + + // seek past the UTF-8 BOM if it exists (particularly on Windows, some + // editors tend to add it, and it'll cause parsing to fail) + data = bytes.TrimPrefix(data, utf8BOM) + + return newParser().parse(string(data), vars, lookupFn) +} + +// Load will read your env file(s) and load them into ENV for this process. +// +// Call this function as close as possible to the start of your program (ideally in main). +// +// If you call Load without any args it will default to loading .env in the current path. +// +// You can otherwise tell it which files to load (there can be more than one) like: +// +// godotenv.Load("fileone", "filetwo") +// +// It's important to note that it WILL NOT OVERRIDE an env variable that already exists - consider the .env file to set dev vars or sensible defaults +func Load(filenames ...string) error { + return load(false, filenames...) +} + +func load(overload bool, filenames ...string) error { + filenames = filenamesOrDefault(filenames) + for _, filename := range filenames { + err := loadFile(filename, overload) + if err != nil { + return err + } + } + return nil +} + +// ReadWithLookup gets all env vars from the files and/or lookup function and return values as +// a map rather than automatically writing values into env +func ReadWithLookup(lookupFn LookupFn, filenames ...string) (map[string]string, error) { + filenames = filenamesOrDefault(filenames) + envMap := make(map[string]string) + + for _, filename := range filenames { + individualEnvMap, individualErr := ReadFile(filename, lookupFn) + + if individualErr != nil { + return envMap, individualErr + } + + for key, value := range individualEnvMap { + if startsWithDigitRegex.MatchString(key) { + continue + } + envMap[key] = value + } + } + + return envMap, nil +} + +// Read all env (with same file loading semantics as Load) but return values as +// a map rather than automatically writing values into env +func Read(filenames ...string) (map[string]string, error) { + return ReadWithLookup(nil, filenames...) +} + +// UnmarshalBytesWithLookup parses env file from byte slice of chars, returning a map of keys and values. +func UnmarshalBytesWithLookup(src []byte, lookupFn LookupFn) (map[string]string, error) { + return UnmarshalWithLookup(string(src), lookupFn) +} + +// UnmarshalWithLookup parses env file from string, returning a map of keys and values. +func UnmarshalWithLookup(src string, lookupFn LookupFn) (map[string]string, error) { + out := make(map[string]string) + err := newParser().parse(src, out, lookupFn) + return out, err +} + +func filenamesOrDefault(filenames []string) []string { + if len(filenames) == 0 { + return []string{".env"} + } + return filenames +} + +func loadFile(filename string, overload bool) error { + envMap, err := ReadFile(filename, nil) + if err != nil { + return err + } + + currentEnv := map[string]bool{} + rawEnv := os.Environ() + for _, rawEnvLine := range rawEnv { + key := strings.Split(rawEnvLine, "=")[0] + currentEnv[key] = true + } + + for key, value := range envMap { + if !currentEnv[key] || overload { + _ = os.Setenv(key, value) + } + } + + return nil +} + +func ReadFile(filename string, lookupFn LookupFn) (map[string]string, error) { + file, err := os.Open(filename) + if err != nil { + return nil, err + } + defer file.Close() + + return ParseWithLookup(file, lookupFn) +} + +func expandVariables(value string, envMap map[string]string, lookupFn LookupFn) (string, error) { + retVal, err := template.Substitute(value, func(k string) (string, bool) { + if v, ok := lookupFn(k); ok { + return v, true + } + v, ok := envMap[k] + return v, ok + }) + if err != nil { + return value, err + } + return retVal, nil +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/dotenv/parser.go b/vendor/github.com/compose-spec/compose-go/v2/dotenv/parser.go new file mode 100644 index 00000000..2db7b907 --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/dotenv/parser.go @@ -0,0 +1,286 @@ +package dotenv + +import ( + "errors" + "fmt" + "regexp" + "strconv" + "strings" + "unicode" +) + +const ( + charComment = '#' + prefixSingleQuote = '\'' + prefixDoubleQuote = '"' +) + +var ( + escapeSeqRegex = regexp.MustCompile(`(\\(?:[abcfnrtv$"\\]|0\d{0,3}))`) + exportRegex = regexp.MustCompile(`^export\s+`) +) + +type parser struct { + line int +} + +func newParser() *parser { + return &parser{ + line: 1, + } +} + +func (p *parser) parse(src string, out map[string]string, lookupFn LookupFn) error { + cutset := src + if lookupFn == nil { + lookupFn = noLookupFn + } + for { + cutset = p.getStatementStart(cutset) + if cutset == "" { + // reached end of file + break + } + + key, left, inherited, err := p.locateKeyName(cutset) + if err != nil { + return err + } + if strings.Contains(key, " ") { + return fmt.Errorf("line %d: key cannot contain a space", p.line) + } + + if inherited { + value, ok := lookupFn(key) + if ok { + out[key] = value + } + cutset = left + continue + } + + value, left, err := p.extractVarValue(left, out, lookupFn) + if err != nil { + return err + } + + out[key] = value + cutset = left + } + + return nil +} + +// getStatementPosition returns position of statement begin. +// +// It skips any comment line or non-whitespace character. +func (p *parser) getStatementStart(src string) string { + pos := p.indexOfNonSpaceChar(src) + if pos == -1 { + return "" + } + + src = src[pos:] + if src[0] != charComment { + return src + } + + // skip comment section + pos = strings.IndexFunc(src, isCharFunc('\n')) + if pos == -1 { + return "" + } + return p.getStatementStart(src[pos:]) +} + +// locateKeyName locates and parses key name and returns rest of slice +func (p *parser) locateKeyName(src string) (string, string, bool, error) { + var key string + var inherited bool + // trim "export" and space at beginning + if exportRegex.MatchString(src) { + // we use a `strings.trim` to preserve the pointer to the same underlying memory. + // a regexp replace would copy the string. + src = strings.TrimLeftFunc(strings.TrimPrefix(src, "export"), isSpace) + } + + // locate key name end and validate it in single loop + offset := 0 +loop: + for i, rune := range src { + if isSpace(rune) { + continue + } + + switch rune { + case '=', ':', '\n': + // library also supports yaml-style value declaration + key = src[0:i] + offset = i + 1 + inherited = rune == '\n' + break loop + case '_', '.', '-', '[', ']': + default: + // variable name should match [A-Za-z0-9_.-] + if unicode.IsLetter(rune) || unicode.IsNumber(rune) { + continue + } + + return "", "", inherited, fmt.Errorf( + `line %d: unexpected character %q in variable name %q`, + p.line, string(rune), strings.Split(src, "\n")[0]) + } + } + + if src == "" { + return "", "", inherited, errors.New("zero length string") + } + + if inherited && strings.IndexByte(key, ' ') == -1 { + p.line++ + } + + // trim whitespace + key = strings.TrimRightFunc(key, unicode.IsSpace) + cutset := strings.TrimLeftFunc(src[offset:], isSpace) + return key, cutset, inherited, nil +} + +// extractVarValue extracts variable value and returns rest of slice +func (p *parser) extractVarValue(src string, envMap map[string]string, lookupFn LookupFn) (string, string, error) { + quote, isQuoted := hasQuotePrefix(src) + if !isQuoted { + // unquoted value - read until new line + value, rest, _ := strings.Cut(src, "\n") + p.line++ + + // Remove inline comments on unquoted lines + value, _, _ = strings.Cut(value, " #") + value = strings.TrimRightFunc(value, unicode.IsSpace) + retVal, err := expandVariables(value, envMap, lookupFn) + return retVal, rest, err + } + + previousCharIsEscape := false + // lookup quoted string terminator + var chars []byte + for i := 1; i < len(src); i++ { + char := src[i] + if char == '\n' { + p.line++ + } + if char != quote { + if !previousCharIsEscape && char == '\\' { + previousCharIsEscape = true + continue + } + if previousCharIsEscape { + previousCharIsEscape = false + chars = append(chars, '\\') + } + chars = append(chars, char) + continue + } + + // skip escaped quote symbol (\" or \', depends on quote) + if previousCharIsEscape { + previousCharIsEscape = false + chars = append(chars, char) + continue + } + + // trim quotes + value := string(chars) + if quote == prefixDoubleQuote { + // expand standard shell escape sequences & then interpolate + // variables on the result + retVal, err := expandVariables(expandEscapes(value), envMap, lookupFn) + if err != nil { + return "", "", err + } + value = retVal + } + + return value, src[i+1:], nil + } + + // return formatted error if quoted string is not terminated + valEndIndex := strings.IndexFunc(src, isCharFunc('\n')) + if valEndIndex == -1 { + valEndIndex = len(src) + } + + return "", "", fmt.Errorf("line %d: unterminated quoted value %s", p.line, src[:valEndIndex]) +} + +func expandEscapes(str string) string { + out := escapeSeqRegex.ReplaceAllStringFunc(str, func(match string) string { + if match == `\$` { + // `\$` is not a Go escape sequence, the expansion parser uses + // the special `$$` syntax + // both `FOO=\$bar` and `FOO=$$bar` are valid in an env file and + // will result in FOO w/ literal value of "$bar" (no interpolation) + return "$$" + } + + if strings.HasPrefix(match, `\0`) { + // octal escape sequences in Go are not prefixed with `\0`, so + // rewrite the prefix, e.g. `\0123` -> `\123` -> literal value "S" + match = strings.Replace(match, `\0`, `\`, 1) + } + + // use Go to unquote (unescape) the literal + // see https://go.dev/ref/spec#Rune_literals + // + // NOTE: Go supports ADDITIONAL escapes like `\x` & `\u` & `\U`! + // These are NOT supported, which is why we use a regex to find + // only matches we support and then use `UnquoteChar` instead of a + // `Unquote` on the entire value + v, _, _, err := strconv.UnquoteChar(match, '"') + if err != nil { + return match + } + return string(v) + }) + return out +} + +func (p *parser) indexOfNonSpaceChar(src string) int { + return strings.IndexFunc(src, func(r rune) bool { + if r == '\n' { + p.line++ + } + return !unicode.IsSpace(r) + }) +} + +// hasQuotePrefix reports whether charset starts with single or double quote and returns quote character +func hasQuotePrefix(src string) (byte, bool) { + if src == "" { + return 0, false + } + + switch quote := src[0]; quote { + case prefixDoubleQuote, prefixSingleQuote: + return quote, true // isQuoted + default: + return 0, false + } +} + +func isCharFunc(char rune) func(rune) bool { + return func(v rune) bool { + return v == char + } +} + +// isSpace reports whether the rune is a space character but not line break character +// +// this differs from unicode.IsSpace, which also applies line break as space +func isSpace(r rune) bool { + switch r { + case '\t', '\v', '\f', '\r', ' ', 0x85, 0xA0: + return true + } + return false +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/errdefs/errors.go b/vendor/github.com/compose-spec/compose-go/v2/errdefs/errors.go new file mode 100644 index 00000000..1990ddd2 --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/errdefs/errors.go @@ -0,0 +1,56 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package errdefs + +import "errors" + +var ( + // ErrNotFound is returned when an object is not found + ErrNotFound = errors.New("not found") + + // ErrInvalid is returned when a compose project is invalid + ErrInvalid = errors.New("invalid compose project") + + // ErrUnsupported is returned when a compose project uses an unsupported attribute + ErrUnsupported = errors.New("unsupported attribute") + + // ErrIncompatible is returned when a compose project uses an incompatible attribute + ErrIncompatible = errors.New("incompatible attribute") + + // ErrDisabled is returned when a resource was found in model but is disabled + ErrDisabled = errors.New("disabled") +) + +// IsNotFoundError returns true if the unwrapped error is ErrNotFound +func IsNotFoundError(err error) bool { + return errors.Is(err, ErrNotFound) +} + +// IsInvalidError returns true if the unwrapped error is ErrInvalid +func IsInvalidError(err error) bool { + return errors.Is(err, ErrInvalid) +} + +// IsUnsupportedError returns true if the unwrapped error is ErrUnsupported +func IsUnsupportedError(err error) bool { + return errors.Is(err, ErrUnsupported) +} + +// IsUnsupportedError returns true if the unwrapped error is ErrIncompatible +func IsIncompatibleError(err error) bool { + return errors.Is(err, ErrIncompatible) +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/format/volume.go b/vendor/github.com/compose-spec/compose-go/v2/format/volume.go new file mode 100644 index 00000000..b696af32 --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/format/volume.go @@ -0,0 +1,199 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package format + +import ( + "errors" + "fmt" + "strings" + "unicode" + "unicode/utf8" + + "github.com/compose-spec/compose-go/v2/types" +) + +const endOfSpec = rune(0) + +// ParseVolume parses a volume spec without any knowledge of the target platform +func ParseVolume(spec string) (types.ServiceVolumeConfig, error) { + volume := types.ServiceVolumeConfig{} + + switch len(spec) { + case 0: + return volume, errors.New("invalid empty volume spec") + case 1, 2: + volume.Target = spec + volume.Type = types.VolumeTypeVolume + return volume, nil + } + + var buffer []rune + var inVarSubstitution int // Track nesting depth of ${...} + for i, char := range spec + string(endOfSpec) { + // Check if we're entering a variable substitution + if char == '$' && i+1 < len(spec) && rune(spec[i+1]) == '{' { + inVarSubstitution++ + buffer = append(buffer, char) + continue + } + + // Check if we're exiting a variable substitution + if char == '}' && inVarSubstitution > 0 { + inVarSubstitution-- + buffer = append(buffer, char) + continue + } + + switch { + case isWindowsDrive(buffer, char): + buffer = append(buffer, char) + case (char == ':' || char == endOfSpec) && inVarSubstitution == 0: + if err := populateFieldFromBuffer(char, buffer, &volume); err != nil { + populateType(&volume) + return volume, fmt.Errorf("invalid spec: %s: %w", spec, err) + } + buffer = nil + default: + buffer = append(buffer, char) + } + } + + populateType(&volume) + return volume, nil +} + +func isWindowsDrive(buffer []rune, char rune) bool { + return char == ':' && len(buffer) == 1 && unicode.IsLetter(buffer[0]) +} + +func populateFieldFromBuffer(char rune, buffer []rune, volume *types.ServiceVolumeConfig) error { + strBuffer := string(buffer) + switch { + case len(buffer) == 0: + return errors.New("empty section between colons") + // Anonymous volume + case volume.Source == "" && char == endOfSpec: + volume.Target = strBuffer + return nil + case volume.Source == "": + volume.Source = strBuffer + return nil + case volume.Target == "": + volume.Target = strBuffer + return nil + case char == ':': + return errors.New("too many colons") + } + for _, option := range strings.Split(strBuffer, ",") { + switch option { + case "ro": + volume.ReadOnly = true + case "rw": + volume.ReadOnly = false + case "nocopy": + volume.Volume = &types.ServiceVolumeVolume{NoCopy: true} + default: + if isBindOption(option) { + setBindOption(volume, option) + } + // ignore unknown options FIXME why not report an error here? + } + } + return nil +} + +var Propagations = []string{ + types.PropagationRPrivate, + types.PropagationPrivate, + types.PropagationRShared, + types.PropagationShared, + types.PropagationRSlave, + types.PropagationSlave, +} + +type setBindOptionFunc func(bind *types.ServiceVolumeBind, option string) + +var bindOptions = map[string]setBindOptionFunc{ + types.PropagationRPrivate: setBindPropagation, + types.PropagationPrivate: setBindPropagation, + types.PropagationRShared: setBindPropagation, + types.PropagationShared: setBindPropagation, + types.PropagationRSlave: setBindPropagation, + types.PropagationSlave: setBindPropagation, + types.SELinuxShared: setBindSELinux, + types.SELinuxPrivate: setBindSELinux, +} + +func setBindPropagation(bind *types.ServiceVolumeBind, option string) { + bind.Propagation = option +} + +func setBindSELinux(bind *types.ServiceVolumeBind, option string) { + bind.SELinux = option +} + +func isBindOption(option string) bool { + _, ok := bindOptions[option] + + return ok +} + +func setBindOption(volume *types.ServiceVolumeConfig, option string) { + if volume.Bind == nil { + volume.Bind = &types.ServiceVolumeBind{} + } + + bindOptions[option](volume.Bind, option) +} + +func populateType(volume *types.ServiceVolumeConfig) { + if isFilePath(volume.Source) { + volume.Type = types.VolumeTypeBind + if volume.Bind == nil { + volume.Bind = &types.ServiceVolumeBind{} + } + // For backward compatibility with docker-compose legacy, using short notation involves + // bind will create missing host path + volume.Bind.CreateHostPath = true + } else { + volume.Type = types.VolumeTypeVolume + if volume.Volume == nil { + volume.Volume = &types.ServiceVolumeVolume{} + } + } +} + +func isFilePath(source string) bool { + if source == "" { + return false + } + switch source[0] { + case '.', '/', '~': + return true + } + + // windows named pipes + if strings.HasPrefix(source, `\\`) { + return true + } + + first, nextIndex := utf8.DecodeRuneInString(source) + if len(source) <= nextIndex { + return false + } + return isWindowsDrive([]rune{first}, rune(source[nextIndex])) +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/graph/cycle.go b/vendor/github.com/compose-spec/compose-go/v2/graph/cycle.go new file mode 100644 index 00000000..58130ad6 --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/graph/cycle.go @@ -0,0 +1,63 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package graph + +import ( + "fmt" + "slices" + "strings" + + "github.com/compose-spec/compose-go/v2/types" + "github.com/compose-spec/compose-go/v2/utils" +) + +// CheckCycle analyze project's depends_on relation and report an error on cycle detection +func CheckCycle(project *types.Project) error { + g, err := newGraph(project) + if err != nil { + return err + } + return g.checkCycle() +} + +func (g *graph[T]) checkCycle() error { + // iterate on vertices in a name-order to render a predicable error message + // this is required by tests and enforce command reproducibility by user, which otherwise could be confusing + names := utils.MapKeys(g.vertices) + for _, name := range names { + err := searchCycle([]string{name}, g.vertices[name]) + if err != nil { + return err + } + } + return nil +} + +func searchCycle[T any](path []string, v *vertex[T]) error { + names := utils.MapKeys(v.children) + for _, name := range names { + if i := slices.Index(path, name); i >= 0 { + return fmt.Errorf("dependency cycle detected: %s -> %s", strings.Join(path[i:], " -> "), name) + } + ch := v.children[name] + err := searchCycle(append(path, name), ch) + if err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/graph/graph.go b/vendor/github.com/compose-spec/compose-go/v2/graph/graph.go new file mode 100644 index 00000000..de4e9e10 --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/graph/graph.go @@ -0,0 +1,75 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package graph + +// graph represents project as service dependencies +type graph[T any] struct { + vertices map[string]*vertex[T] +} + +// vertex represents a service in the dependencies structure +type vertex[T any] struct { + key string + service *T + children map[string]*vertex[T] + parents map[string]*vertex[T] +} + +func (g *graph[T]) addVertex(name string, service T) { + g.vertices[name] = &vertex[T]{ + key: name, + service: &service, + parents: map[string]*vertex[T]{}, + children: map[string]*vertex[T]{}, + } +} + +func (g *graph[T]) addEdge(src, dest string) { + g.vertices[src].children[dest] = g.vertices[dest] + g.vertices[dest].parents[src] = g.vertices[src] +} + +func (g *graph[T]) roots() []*vertex[T] { + var res []*vertex[T] + for _, v := range g.vertices { + if len(v.parents) == 0 { + res = append(res, v) + } + } + return res +} + +func (g *graph[T]) leaves() []*vertex[T] { + var res []*vertex[T] + for _, v := range g.vertices { + if len(v.children) == 0 { + res = append(res, v) + } + } + + return res +} + +// descendents return all descendents for a vertex, might contain duplicates +func (v *vertex[T]) descendents() []string { + var vx []string + for _, n := range v.children { + vx = append(vx, n.key) + vx = append(vx, n.descendents()...) + } + return vx +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/graph/services.go b/vendor/github.com/compose-spec/compose-go/v2/graph/services.go new file mode 100644 index 00000000..44b36a3f --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/graph/services.go @@ -0,0 +1,80 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package graph + +import ( + "context" + "fmt" + + "github.com/compose-spec/compose-go/v2/types" +) + +// InDependencyOrder walk the service graph an invoke VisitorFn in respect to dependency order +func InDependencyOrder(ctx context.Context, project *types.Project, fn VisitorFn[types.ServiceConfig], options ...func(*Options)) error { + _, err := CollectInDependencyOrder[any](ctx, project, func(ctx context.Context, s string, config types.ServiceConfig) (any, error) { + return nil, fn(ctx, s, config) + }, options...) + return err +} + +// CollectInDependencyOrder walk the service graph an invoke CollectorFn in respect to dependency order, then return result for each call +func CollectInDependencyOrder[T any](ctx context.Context, project *types.Project, fn CollectorFn[types.ServiceConfig, T], options ...func(*Options)) (map[string]T, error) { + graph, err := newGraph(project) + if err != nil { + return nil, err + } + t := newTraversal(fn) + for _, option := range options { + option(t.Options) + } + err = walk(ctx, graph, t) + return t.results, err +} + +// newGraph creates a service graph from project +func newGraph(project *types.Project) (*graph[types.ServiceConfig], error) { + g := &graph[types.ServiceConfig]{ + vertices: map[string]*vertex[types.ServiceConfig]{}, + } + + for name, s := range project.Services { + g.addVertex(name, s) + } + + for name, s := range project.Services { + src := g.vertices[name] + for dep, condition := range s.DependsOn { + dest, ok := g.vertices[dep] + if !ok { + if condition.Required { + if ds, exists := project.DisabledServices[dep]; exists { + return nil, fmt.Errorf("service %q is required by %q but is disabled. Can be enabled by profiles %s", dep, name, ds.Profiles) + } + return nil, fmt.Errorf("service %q depends on unknown service %q", name, dep) + } + delete(s.DependsOn, name) + project.Services[name] = s + continue + } + src.children[dep] = dest + dest.parents[name] = src + } + } + + err := g.checkCycle() + return g, err +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/graph/traversal.go b/vendor/github.com/compose-spec/compose-go/v2/graph/traversal.go new file mode 100644 index 00000000..99ab4a4b --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/graph/traversal.go @@ -0,0 +1,211 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package graph + +import ( + "context" + "slices" + "sync" + + "golang.org/x/sync/errgroup" +) + +// CollectorFn executes on each graph vertex based on visit order and return associated value +type CollectorFn[S any, T any] func(context.Context, string, S) (T, error) + +// VisitorFn executes on each graph nodes based on visit order +type VisitorFn[S any] func(context.Context, string, S) error + +type traversal[S any, T any] struct { + *Options + visitor CollectorFn[S, T] + + mu sync.Mutex + status map[string]int + results map[string]T +} + +type Options struct { + // inverse reverse the traversal direction + inverse bool + // maxConcurrency limit the concurrent execution of visitorFn while walking the graph + maxConcurrency int + // after marks a set of node as starting points walking the graph + after []string +} + +const ( + vertexEntered = iota + vertexVisited +) + +func newTraversal[S, T any](fn CollectorFn[S, T]) *traversal[S, T] { + return &traversal[S, T]{ + Options: &Options{}, + status: map[string]int{}, + results: map[string]T{}, + visitor: fn, + } +} + +// WithMaxConcurrency configure traversal to limit concurrency walking graph nodes +func WithMaxConcurrency(concurrency int) func(*Options) { + return func(o *Options) { + o.maxConcurrency = concurrency + } +} + +// InReverseOrder configure traversal to walk the graph in reverse dependency order +func InReverseOrder(o *Options) { + o.inverse = true +} + +// WithRootNodesAndDown creates a graphTraversal to start from selected nodes +func WithRootNodesAndDown(nodes []string) func(*Options) { + return func(o *Options) { + o.after = nodes + } +} + +func walk[S, T any](ctx context.Context, g *graph[S], t *traversal[S, T]) error { + expect := len(g.vertices) + if expect == 0 { + return nil + } + // nodeCh need to allow n=expect writers while reader goroutine could have returned after ctx.Done + nodeCh := make(chan *vertex[S], expect) + defer close(nodeCh) + + eg, ctx := errgroup.WithContext(ctx) + if t.maxConcurrency > 0 { + eg.SetLimit(t.maxConcurrency + 1) + } + + eg.Go(func() error { + for { + select { + case <-ctx.Done(): + return nil + case node := <-nodeCh: + expect-- + if expect == 0 { + return nil + } + + for _, adj := range t.adjacentNodes(node) { + t.visit(ctx, eg, adj, nodeCh) + } + } + } + }) + + // select nodes to start walking the graph based on traversal.direction + for _, node := range t.extremityNodes(g) { + t.visit(ctx, eg, node, nodeCh) + } + + return eg.Wait() +} + +func (t *traversal[S, T]) visit(ctx context.Context, eg *errgroup.Group, node *vertex[S], nodeCh chan *vertex[S]) { + if !t.ready(node) { + // don't visit this service yet as dependencies haven't been visited + return + } + if !t.enter(node) { + // another worker already acquired this node + return + } + eg.Go(func() error { + var ( + err error + result T + ) + if !t.skip(node) { + result, err = t.visitor(ctx, node.key, *node.service) + } + t.done(node, result) + nodeCh <- node + return err + }) +} + +func (t *traversal[S, T]) extremityNodes(g *graph[S]) []*vertex[S] { + if t.inverse { + return g.roots() + } + return g.leaves() +} + +func (t *traversal[S, T]) adjacentNodes(v *vertex[S]) map[string]*vertex[S] { + if t.inverse { + return v.children + } + return v.parents +} + +func (t *traversal[S, T]) ready(v *vertex[S]) bool { + t.mu.Lock() + defer t.mu.Unlock() + + depends := v.children + if t.inverse { + depends = v.parents + } + for name := range depends { + if t.status[name] != vertexVisited { + return false + } + } + return true +} + +func (t *traversal[S, T]) enter(v *vertex[S]) bool { + t.mu.Lock() + defer t.mu.Unlock() + + if _, ok := t.status[v.key]; ok { + return false + } + t.status[v.key] = vertexEntered + return true +} + +func (t *traversal[S, T]) done(v *vertex[S], result T) { + t.mu.Lock() + defer t.mu.Unlock() + t.status[v.key] = vertexVisited + t.results[v.key] = result +} + +func (t *traversal[S, T]) skip(node *vertex[S]) bool { + if len(t.after) == 0 { + return false + } + if slices.Contains(t.after, node.key) { + return false + } + + // is none of our starting node is a descendent, skip visit + ancestors := node.descendents() + for _, name := range t.after { + if slices.Contains(ancestors, name) { + return false + } + } + return true +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/interpolation/interpolation.go b/vendor/github.com/compose-spec/compose-go/v2/interpolation/interpolation.go new file mode 100644 index 00000000..b56e0afe --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/interpolation/interpolation.go @@ -0,0 +1,137 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package interpolation + +import ( + "errors" + "fmt" + "os" + + "github.com/compose-spec/compose-go/v2/template" + "github.com/compose-spec/compose-go/v2/tree" +) + +// Options supported by Interpolate +type Options struct { + // LookupValue from a key + LookupValue LookupValue + // TypeCastMapping maps key paths to functions to cast to a type + TypeCastMapping map[tree.Path]Cast + // Substitution function to use + Substitute func(string, template.Mapping) (string, error) +} + +// LookupValue is a function which maps from variable names to values. +// Returns the value as a string and a bool indicating whether +// the value is present, to distinguish between an empty string +// and the absence of a value. +type LookupValue func(key string) (string, bool) + +// Cast a value to a new type, or return an error if the value can't be cast +type Cast func(value string) (interface{}, error) + +// Interpolate replaces variables in a string with the values from a mapping +func Interpolate(config map[string]interface{}, opts Options) (map[string]interface{}, error) { + if opts.LookupValue == nil { + opts.LookupValue = os.LookupEnv + } + if opts.TypeCastMapping == nil { + opts.TypeCastMapping = make(map[tree.Path]Cast) + } + if opts.Substitute == nil { + opts.Substitute = template.Substitute + } + + out := map[string]interface{}{} + + for key, value := range config { + interpolatedValue, err := recursiveInterpolate(value, tree.NewPath(key), opts) + if err != nil { + return out, err + } + out[key] = interpolatedValue + } + + return out, nil +} + +func recursiveInterpolate(value interface{}, path tree.Path, opts Options) (interface{}, error) { + switch value := value.(type) { + case string: + newValue, err := opts.Substitute(value, template.Mapping(opts.LookupValue)) + if err != nil { + return value, newPathError(path, err) + } + caster, ok := opts.getCasterForPath(path) + if !ok { + return newValue, nil + } + casted, err := caster(newValue) + if err != nil { + return casted, newPathError(path, fmt.Errorf("failed to cast to expected type: %w", err)) + } + return casted, nil + + case map[string]interface{}: + out := map[string]interface{}{} + for key, elem := range value { + interpolatedElem, err := recursiveInterpolate(elem, path.Next(key), opts) + if err != nil { + return nil, err + } + out[key] = interpolatedElem + } + return out, nil + + case []interface{}: + out := make([]interface{}, len(value)) + for i, elem := range value { + interpolatedElem, err := recursiveInterpolate(elem, path.Next(tree.PathMatchList), opts) + if err != nil { + return nil, err + } + out[i] = interpolatedElem + } + return out, nil + + default: + return value, nil + } +} + +func newPathError(path tree.Path, err error) error { + var ite *template.InvalidTemplateError + switch { + case err == nil: + return nil + case errors.As(err, &ite): + return fmt.Errorf( + "invalid interpolation format for %s.\nYou may need to escape any $ with another $.\n%s", + path, ite.Template) + default: + return fmt.Errorf("error while interpolating %s: %w", path, err) + } +} + +func (o Options) getCasterForPath(path tree.Path) (Cast, bool) { + for pattern, caster := range o.TypeCastMapping { + if path.Matches(pattern) { + return caster, true + } + } + return nil, false +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/loader/environment.go b/vendor/github.com/compose-spec/compose-go/v2/loader/environment.go new file mode 100644 index 00000000..3f7277b8 --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/loader/environment.go @@ -0,0 +1,110 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package loader + +import ( + "fmt" + + "github.com/compose-spec/compose-go/v2/types" +) + +// ResolveEnvironment update the environment variables for the format {- VAR} (without interpolation) +func ResolveEnvironment(dict map[string]any, environment types.Mapping) { + resolveServicesEnvironment(dict, environment) + resolveSecretsEnvironment(dict, environment) + resolveConfigsEnvironment(dict, environment) +} + +func resolveServicesEnvironment(dict map[string]any, environment types.Mapping) { + services, ok := dict["services"].(map[string]any) + if !ok { + return + } + + for service, cfg := range services { + serviceConfig, ok := cfg.(map[string]any) + if !ok { + continue + } + serviceEnv, ok := serviceConfig["environment"].([]any) + if !ok { + continue + } + envs := []any{} + for _, env := range serviceEnv { + varEnv, ok := env.(string) + if !ok { + continue + } + if found, ok := environment[varEnv]; ok { + envs = append(envs, fmt.Sprintf("%s=%s", varEnv, found)) + } else { + // either does not exist or it was already resolved in interpolation + envs = append(envs, varEnv) + } + } + serviceConfig["environment"] = envs + services[service] = serviceConfig + } + dict["services"] = services +} + +func resolveSecretsEnvironment(dict map[string]any, environment types.Mapping) { + secrets, ok := dict["secrets"].(map[string]any) + if !ok { + return + } + + for name, cfg := range secrets { + secret, ok := cfg.(map[string]any) + if !ok { + continue + } + env, ok := secret["environment"].(string) + if !ok { + continue + } + if found, ok := environment[env]; ok { + secret[types.SecretConfigXValue] = found + } + secrets[name] = secret + } + dict["secrets"] = secrets +} + +func resolveConfigsEnvironment(dict map[string]any, environment types.Mapping) { + configs, ok := dict["configs"].(map[string]any) + if !ok { + return + } + + for name, cfg := range configs { + config, ok := cfg.(map[string]any) + if !ok { + continue + } + env, ok := config["environment"].(string) + if !ok { + continue + } + if found, ok := environment[env]; ok { + config["content"] = found + } + configs[name] = config + } + dict["configs"] = configs +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/loader/example1.env b/vendor/github.com/compose-spec/compose-go/v2/loader/example1.env new file mode 100644 index 00000000..61716e93 --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/loader/example1.env @@ -0,0 +1,10 @@ +# passed through +FOO=foo_from_env_file +ENV.WITH.DOT=ok +ENV_WITH_UNDERSCORE=ok + +# overridden in example2.env +BAR=bar_from_env_file + +# overridden in full-example.yml +BAZ=baz_from_env_file diff --git a/vendor/github.com/compose-spec/compose-go/v2/loader/example1.label b/vendor/github.com/compose-spec/compose-go/v2/loader/example1.label new file mode 100644 index 00000000..27d43cff --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/loader/example1.label @@ -0,0 +1,10 @@ +# passed through +FOO=foo_from_label_file +LABEL.WITH.DOT=ok +LABEL_WITH_UNDERSCORE=ok + +# overridden in example2.label +BAR=bar_from_label_file + +# overridden in full-example.yml +BAZ=baz_from_label_file diff --git a/vendor/github.com/compose-spec/compose-go/v2/loader/example2.env b/vendor/github.com/compose-spec/compose-go/v2/loader/example2.env new file mode 100644 index 00000000..f47d1e61 --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/loader/example2.env @@ -0,0 +1,4 @@ +BAR=bar_from_env_file_2 + +# overridden in configDetails.Environment +QUX=quz_from_env_file_2 diff --git a/vendor/github.com/compose-spec/compose-go/v2/loader/example2.label b/vendor/github.com/compose-spec/compose-go/v2/loader/example2.label new file mode 100644 index 00000000..aa667c30 --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/loader/example2.label @@ -0,0 +1,4 @@ +BAR=bar_from_label_file_2 + +# overridden in configDetails.Labels +QUX=quz_from_label_file_2 diff --git a/vendor/github.com/compose-spec/compose-go/v2/loader/extends.go b/vendor/github.com/compose-spec/compose-go/v2/loader/extends.go new file mode 100644 index 00000000..d85e84ba --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/loader/extends.go @@ -0,0 +1,221 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package loader + +import ( + "context" + "fmt" + "path/filepath" + + "github.com/compose-spec/compose-go/v2/consts" + "github.com/compose-spec/compose-go/v2/override" + "github.com/compose-spec/compose-go/v2/paths" + "github.com/compose-spec/compose-go/v2/types" +) + +func ApplyExtends(ctx context.Context, dict map[string]any, opts *Options, tracker *cycleTracker, post PostProcessor) error { + a, ok := dict["services"] + if !ok { + return nil + } + services, ok := a.(map[string]any) + if !ok { + return fmt.Errorf("services must be a mapping") + } + for name := range services { + merged, err := applyServiceExtends(ctx, name, services, opts, tracker, post) + if err != nil { + return err + } + services[name] = merged + } + dict["services"] = services + return nil +} + +func applyServiceExtends(ctx context.Context, name string, services map[string]any, opts *Options, tracker *cycleTracker, post PostProcessor) (any, error) { + s := services[name] + if s == nil { + return nil, nil + } + service, ok := s.(map[string]any) + if !ok { + return nil, fmt.Errorf("services.%s must be a mapping", name) + } + extends, ok := service["extends"] + if !ok { + return s, nil + } + filename := ctx.Value(consts.ComposeFileKey{}).(string) + var ( + err error + ref string + file any + ) + switch v := extends.(type) { + case map[string]any: + ref, ok = v["service"].(string) + if !ok { + return nil, fmt.Errorf("extends.%s.service is required", name) + } + file = v["file"] + opts.ProcessEvent("extends", v) + case string: + ref = v + opts.ProcessEvent("extends", map[string]any{"service": ref}) + } + + var ( + base any + processor = post + ) + + if file != nil { + refFilename := file.(string) + services, processor, err = getExtendsBaseFromFile(ctx, name, ref, filename, refFilename, opts, tracker) + if err != nil { + return nil, err + } + filename = refFilename + } else { + _, ok := services[ref] + if !ok { + return nil, fmt.Errorf("cannot extend service %q in %s: service %q not found", name, filename, ref) + } + } + + tracker, err = tracker.Add(filename, name) + if err != nil { + return nil, err + } + + // recursively apply `extends` + base, err = applyServiceExtends(ctx, ref, services, opts, tracker, processor) + if err != nil { + return nil, err + } + + if base == nil { + return service, nil + } + source := deepClone(base).(map[string]any) + + err = post.Apply(map[string]any{ + "services": map[string]any{ + name: source, + }, + }) + if err != nil { + return nil, err + } + + merged, err := override.ExtendService(source, service) + if err != nil { + return nil, err + } + + delete(merged, "extends") + services[name] = merged + return merged, nil +} + +func getExtendsBaseFromFile( + ctx context.Context, + name, ref string, + path, refPath string, + opts *Options, + ct *cycleTracker, +) (map[string]any, PostProcessor, error) { + for _, loader := range opts.ResourceLoaders { + if !loader.Accept(refPath) { + continue + } + local, err := loader.Load(ctx, refPath) + if err != nil { + return nil, nil, err + } + localdir := filepath.Dir(local) + relworkingdir := loader.Dir(refPath) + + extendsOpts := opts.clone() + // replace localResourceLoader with a new flavour, using extended file base path + extendsOpts.ResourceLoaders = append(opts.RemoteResourceLoaders(), localResourceLoader{ + WorkingDir: localdir, + }) + extendsOpts.ResolvePaths = false // we do relative path resolution after file has been loaded + extendsOpts.SkipNormalization = true + extendsOpts.SkipConsistencyCheck = true + extendsOpts.SkipInclude = true + extendsOpts.SkipExtends = true // we manage extends recursively based on raw service definition + extendsOpts.SkipValidation = true // we validate the merge result + extendsOpts.SkipDefaultValues = true + source, processor, err := loadYamlFile(ctx, types.ConfigFile{Filename: local}, + extendsOpts, relworkingdir, nil, ct, map[string]any{}, nil) + if err != nil { + return nil, nil, err + } + m, ok := source["services"] + if !ok { + return nil, nil, fmt.Errorf("cannot extend service %q in %s: no services section", name, local) + } + services, ok := m.(map[string]any) + if !ok { + return nil, nil, fmt.Errorf("cannot extend service %q in %s: services must be a mapping", name, local) + } + _, ok = services[ref] + if !ok { + return nil, nil, fmt.Errorf( + "cannot extend service %q in %s: service %q not found in %s", + name, + path, + ref, + refPath, + ) + } + + var remotes []paths.RemoteResource + for _, loader := range opts.RemoteResourceLoaders() { + remotes = append(remotes, loader.Accept) + } + err = paths.ResolveRelativePaths(source, relworkingdir, remotes) + if err != nil { + return nil, nil, err + } + + return services, processor, nil + } + return nil, nil, fmt.Errorf("cannot read %s", refPath) +} + +func deepClone(value any) any { + switch v := value.(type) { + case []any: + cp := make([]any, len(v)) + for i, e := range v { + cp[i] = deepClone(e) + } + return cp + case map[string]any: + cp := make(map[string]any, len(v)) + for k, e := range v { + cp[k] = deepClone(e) + } + return cp + default: + return value + } +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/loader/fix.go b/vendor/github.com/compose-spec/compose-go/v2/loader/fix.go new file mode 100644 index 00000000..7a6e88d8 --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/loader/fix.go @@ -0,0 +1,36 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package loader + +// fixEmptyNotNull is a workaround for https://github.com/xeipuuv/gojsonschema/issues/141 +// as go-yaml `[]` will load as a `[]any(nil)`, which is not the same as an empty array +func fixEmptyNotNull(value any) interface{} { + switch v := value.(type) { + case []any: + if v == nil { + return []any{} + } + for i, e := range v { + v[i] = fixEmptyNotNull(e) + } + case map[string]any: + for k, e := range v { + v[k] = fixEmptyNotNull(e) + } + } + return value +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/loader/full-example.yml b/vendor/github.com/compose-spec/compose-go/v2/loader/full-example.yml new file mode 100644 index 00000000..944b2d47 --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/loader/full-example.yml @@ -0,0 +1,461 @@ +name: full_example_project_name +services: + + bar: + build: + dockerfile_inline: | + FROM alpine + RUN echo "hello" > /world.txt + + foo: + annotations: + - com.example.foo=bar + build: + context: ./dir + dockerfile: Dockerfile + args: + foo: bar + ssh: + - default + target: foo + network: foo + cache_from: + - foo + - bar + labels: [FOO=BAR] + additional_contexts: + foo: ./bar + secrets: + - source: secret1 + target: /run/secrets/secret1 + - source: secret2 + target: my_secret + uid: '103' + gid: '103' + mode: 0440 + tags: + - foo:v1.0.0 + - docker.io/username/foo:my-other-tag + - ${COMPOSE_PROJECT_NAME}:1.0.0 + platforms: + - linux/amd64 + - linux/arm64 + + + cap_add: + - ALL + + cap_drop: + - NET_ADMIN + - SYS_ADMIN + + cgroup_parent: m-executor-abcd + + # String or list + command: bundle exec thin -p 3000 + # command: ["bundle", "exec", "thin", "-p", "3000"] + + configs: + - config1 + - source: config2 + target: /my_config + uid: '103' + gid: '103' + mode: 0440 + + container_name: my-web-container + + depends_on: + - db + - redis + + deploy: + mode: replicated + replicas: 6 + labels: [FOO=BAR] + rollback_config: + parallelism: 3 + delay: 10s + failure_action: continue + monitor: 60s + max_failure_ratio: 0.3 + order: start-first + update_config: + parallelism: 3 + delay: 10s + failure_action: continue + monitor: 60s + max_failure_ratio: 0.3 + order: start-first + resources: + limits: + cpus: '0.001' + memory: 50M + reservations: + cpus: '0.0001' + memory: 20M + generic_resources: + - discrete_resource_spec: + kind: 'gpu' + value: 2 + - discrete_resource_spec: + kind: 'ssd' + value: 1 + restart_policy: + condition: on-failure + delay: 5s + max_attempts: 3 + window: 120s + placement: + constraints: [node=foo] + max_replicas_per_node: 5 + preferences: + - spread: node.labels.az + endpoint_mode: dnsrr + + device_cgroup_rules: + - "c 1:3 mr" + - "a 7:* rmw" + + devices: + - source: /dev/ttyUSB0 + target: /dev/ttyUSB0 + permissions: rwm + + # String or list + # dns: 8.8.8.8 + dns: + - 8.8.8.8 + - 9.9.9.9 + + # String or list + # dns_search: example.com + dns_search: + - dc1.example.com + - dc2.example.com + + domainname: foo.com + + # String or list + # entrypoint: /code/entrypoint.sh -p 3000 + entrypoint: ["/code/entrypoint.sh", "-p", "3000"] + + # String or list + # env_file: .env + env_file: + - ./example1.env + - path: ./example2.env + required: false + + # Mapping or list + # Mapping values can be strings, numbers or null + # Booleans are not allowed - must be quoted + environment: + BAZ: baz_from_service_def + QUX: + # environment: + # - RACK_ENV=development + # - SHOW=true + # - SESSION_SECRET + + # Items can be strings or numbers + expose: + - "3000" + - 8000 + + external_links: + - redis_1 + - project_db_1:mysql + - project_db_1:postgresql + + # Mapping or list + # Mapping values must be strings + # extra_hosts: + # somehost: "162.242.195.82" + # otherhost: "50.31.209.229" + extra_hosts: + - "otherhost:50.31.209.229" + - "somehost:162.242.195.82" + + hostname: foo + + healthcheck: + test: echo "hello world" + interval: 10s + timeout: 1s + retries: 5 + start_period: 15s + start_interval: 5s + + # Any valid image reference - repo, tag, id, sha + image: redis + # image: ubuntu:14.04 + # image: tutum/influxdb + # image: example-registry.com:4000/postgresql + # image: a4bc65fd + # image: busybox@sha256:38a203e1986cf79639cfb9b2e1d6e773de84002feea2d4eb006b52004ee8502d + + ipc: host + + uts: host + + # Mapping or list + # Mapping values can be strings, numbers or null + labels: + com.example.description: "Accounting webapp" + com.example.number: 42 + com.example.empty-label: + # labels: + # - "com.example.description=Accounting webapp" + # - "com.example.number=42" + # - "com.example.empty-label" + + label_file: + - ./example1.label + - ./example2.label + + links: + - db + - db:database + - redis + + logging: + driver: syslog + options: + syslog-address: "tcp://192.168.0.42:123" + + mac_address: 02:42:ac:11:65:43 + + # network_mode: "bridge" + # network_mode: "host" + # network_mode: "none" + # Use the network mode of an arbitrary container from another service + # network_mode: "service:db" + # Use the network mode of another container, specified by name or id + # network_mode: "container:some-container" + network_mode: "container:0cfeab0f748b9a743dc3da582046357c6ef497631c1a016d28d2bf9b4f899f7b" + + networks: + some-network: + aliases: + - alias1 + - alias3 + other-network: + ipv4_address: 172.16.238.10 + ipv6_address: 2001:3984:3989::10 + mac_address: 02:42:72:98:65:08 + other-other-network: + + pid: "host" + + ports: + - 3000 + - "3001-3005" + - "8000:8000" + - "9090-9091:8080-8081" + - "49100:22" + - "127.0.0.1:8001:8001" + - "127.0.0.1:5000-5010:5000-5010" + + privileged: true + + read_only: true + + restart: always + + secrets: + - source: secret1 + target: /run/secrets/secret1 + - source: secret2 + target: my_secret + uid: '103' + gid: '103' + mode: 0440 + + security_opt: + - label=level:s0:c100,c200 + - label=type:svirt_apache_t + + stdin_open: true + + stop_grace_period: 20s + + stop_signal: SIGUSR1 + storage_opt: + size: "20G" + sysctls: + net.core.somaxconn: 1024 + net.ipv4.tcp_syncookies: 0 + + # String or list + # tmpfs: /run + tmpfs: + - /run + - /tmp + + tty: true + + ulimits: + # Single number or mapping with soft + hard limits + nproc: 65535 + nofile: + soft: 20000 + hard: 40000 + + user: someone + + volumes: + # Just specify a path and let the Engine create a volume + - /var/lib/anonymous + # Specify an absolute path mapping + - /opt/data:/var/lib/data + # Path on the host, relative to the Compose file + - .:/code + - ./static:/var/www/html + # User-relative path + - ~/configs:/etc/configs:ro + # Named volume + - datavolume:/var/lib/volume + - type: bind + source: ./opt + target: /opt/cached + consistency: cached + - type: tmpfs + target: /opt/tmpfs + tmpfs: + size: 10000 + + working_dir: /code + x-bar: baz + x-foo: bar + +networks: + # Entries can be null, which specifies simply that a network + # called "{project name}_some-network" should be created and + # use the default driver + some-network: + + other-network: + driver: overlay + + driver_opts: + # Values can be strings or numbers + foo: "bar" + baz: 1 + + ipam: + driver: overlay + # driver_opts: + # # Values can be strings or numbers + # com.docker.network.enable_ipv6: "true" + # com.docker.network.numeric_value: 1 + config: + - subnet: 172.28.0.0/16 + ip_range: 172.28.5.0/24 + gateway: 172.28.5.254 + aux_addresses: + host1: 172.28.1.5 + host2: 172.28.1.6 + host3: 172.28.1.7 + - subnet: 2001:3984:3989::/64 + gateway: 2001:3984:3989::1 + + labels: + foo: bar + + external-network: + # Specifies that a pre-existing network called "external-network" + # can be referred to within this file as "external-network" + external: true + + other-external-network: + # Specifies that a pre-existing network called "my-cool-network" + # can be referred to within this file as "other-external-network" + external: + name: my-cool-network + x-bar: baz + x-foo: bar + +volumes: + # Entries can be null, which specifies simply that a volume + # called "{project name}_some-volume" should be created and + # use the default driver + some-volume: + + other-volume: + driver: flocker + + driver_opts: + # Values can be strings or numbers + foo: "bar" + baz: 1 + labels: + foo: bar + + another-volume: + name: "user_specified_name" + driver: vsphere + + driver_opts: + # Values can be strings or numbers + foo: "bar" + baz: 1 + + external-volume: + # Specifies that a pre-existing volume called "external-volume" + # can be referred to within this file as "external-volume" + external: true + + other-external-volume: + # Specifies that a pre-existing volume called "my-cool-volume" + # can be referred to within this file as "other-external-volume" + # This example uses the deprecated "volume.external.name" (replaced by "volume.name") + external: + name: my-cool-volume + + external-volume3: + # Specifies that a pre-existing volume called "this-is-volume3" + # can be referred to within this file as "external-volume3" + name: this-is-volume3 + external: true + x-bar: baz + x-foo: bar + +configs: + config1: + file: ./config_data + labels: + foo: bar + config2: + external: + name: my_config + config3: + external: true + config4: + name: foo + file: ~/config_data + x-bar: baz + x-foo: bar + +secrets: + secret1: + file: ./secret_data + labels: + foo: bar + secret2: + external: + name: my_secret + secret3: + external: true + secret4: + name: bar + environment: BAR + x-bar: baz + x-foo: bar + secret5: + file: /abs/secret_data +x-bar: baz +x-foo: bar +x-nested: + bar: baz + foo: bar diff --git a/vendor/github.com/compose-spec/compose-go/v2/loader/include.go b/vendor/github.com/compose-spec/compose-go/v2/loader/include.go new file mode 100644 index 00000000..ff310447 --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/loader/include.go @@ -0,0 +1,223 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package loader + +import ( + "context" + "fmt" + "os" + "path/filepath" + "strings" + + "github.com/compose-spec/compose-go/v2/dotenv" + interp "github.com/compose-spec/compose-go/v2/interpolation" + "github.com/compose-spec/compose-go/v2/override" + "github.com/compose-spec/compose-go/v2/tree" + "github.com/compose-spec/compose-go/v2/types" +) + +// loadIncludeConfig parse the required config from raw yaml +func loadIncludeConfig(source any) ([]types.IncludeConfig, error) { + if source == nil { + return nil, nil + } + configs, ok := source.([]any) + if !ok { + return nil, fmt.Errorf("`include` must be a list, got %s", source) + } + for i, config := range configs { + if v, ok := config.(string); ok { + configs[i] = map[string]any{ + "path": v, + } + } + } + var requires []types.IncludeConfig + err := Transform(source, &requires) + return requires, err +} + +func ApplyInclude(ctx context.Context, workingDir string, environment types.Mapping, model map[string]any, options *Options, included []string, processor PostProcessor) error { + includeConfig, err := loadIncludeConfig(model["include"]) + if err != nil { + return err + } + + for _, r := range includeConfig { + for _, listener := range options.Listeners { + listener("include", map[string]any{ + "path": r.Path, + "workingdir": workingDir, + }) + } + + var relworkingdir string + for i, p := range r.Path { + for _, loader := range options.ResourceLoaders { + if !loader.Accept(p) { + continue + } + path, err := loader.Load(ctx, p) + if err != nil { + return err + } + p = path + + if i == 0 { // This is the "main" file, used to define project-directory. Others are overrides + + switch { + case r.ProjectDirectory == "": + relworkingdir = loader.Dir(path) + r.ProjectDirectory = filepath.Dir(path) + case !filepath.IsAbs(r.ProjectDirectory): + relworkingdir = loader.Dir(r.ProjectDirectory) + r.ProjectDirectory = filepath.Join(workingDir, r.ProjectDirectory) + + default: + relworkingdir = r.ProjectDirectory + + } + for _, f := range included { + if f == path { + included = append(included, path) + return fmt.Errorf("include cycle detected:\n%s\n include %s", included[0], strings.Join(included[1:], "\n include ")) + } + } + } + } + r.Path[i] = p + } + + loadOptions := options.clone() + loadOptions.ResolvePaths = true + loadOptions.SkipNormalization = true + loadOptions.SkipConsistencyCheck = true + loadOptions.ResourceLoaders = append(loadOptions.RemoteResourceLoaders(), localResourceLoader{ + WorkingDir: r.ProjectDirectory, + }) + + if len(r.EnvFile) == 0 { + f := filepath.Join(r.ProjectDirectory, ".env") + if s, err := os.Stat(f); err == nil && !s.IsDir() { + r.EnvFile = types.StringList{f} + } + } else { + envFile := []string{} + for _, f := range r.EnvFile { + if f == "/dev/null" { + continue + } + if !filepath.IsAbs(f) { + f = filepath.Join(workingDir, f) + s, err := os.Stat(f) + if err != nil { + return err + } + if s.IsDir() { + return fmt.Errorf("%s is not a file", f) + } + } + envFile = append(envFile, f) + } + r.EnvFile = envFile + } + + envFromFile, err := dotenv.GetEnvFromFile(environment, r.EnvFile) + if err != nil { + return err + } + + config := types.ConfigDetails{ + WorkingDir: relworkingdir, + ConfigFiles: types.ToConfigFiles(r.Path), + Environment: environment.Clone().Merge(envFromFile), + } + loadOptions.Interpolate = &interp.Options{ + Substitute: options.Interpolate.Substitute, + LookupValue: config.LookupEnv, + TypeCastMapping: options.Interpolate.TypeCastMapping, + } + imported, err := loadYamlModel(ctx, config, loadOptions, &cycleTracker{}, included) + if err != nil { + return err + } + err = importResources(imported, model, processor) + if err != nil { + return err + } + } + delete(model, "include") + return nil +} + +// importResources import into model all resources defined by imported, and report error on conflict +func importResources(source map[string]any, target map[string]any, processor PostProcessor) error { + if err := importResource(source, target, "services", processor); err != nil { + return err + } + if err := importResource(source, target, "volumes", processor); err != nil { + return err + } + if err := importResource(source, target, "networks", processor); err != nil { + return err + } + if err := importResource(source, target, "secrets", processor); err != nil { + return err + } + if err := importResource(source, target, "configs", processor); err != nil { + return err + } + if err := importResource(source, target, "models", processor); err != nil { + return err + } + return nil +} + +func importResource(source map[string]any, target map[string]any, key string, processor PostProcessor) error { + from := source[key] + if from != nil { + var to map[string]any + if v, ok := target[key]; ok { + to = v.(map[string]any) + } else { + to = map[string]any{} + } + for name, a := range from.(map[string]any) { + conflict, ok := to[name] + if !ok { + to[name] = a + continue + } + err := processor.Apply(map[string]any{ + key: map[string]any{ + name: a, + }, + }) + if err != nil { + return err + } + + merged, err := override.MergeYaml(a, conflict, tree.NewPath(key, name)) + if err != nil { + return err + } + to[name] = merged + } + target[key] = to + } + return nil +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/loader/interpolate.go b/vendor/github.com/compose-spec/compose-go/v2/loader/interpolate.go new file mode 100644 index 00000000..dc8dc735 --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/loader/interpolate.go @@ -0,0 +1,118 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package loader + +import ( + "fmt" + "strconv" + "strings" + + interp "github.com/compose-spec/compose-go/v2/interpolation" + "github.com/compose-spec/compose-go/v2/tree" + "github.com/sirupsen/logrus" +) + +var interpolateTypeCastMapping = map[tree.Path]interp.Cast{ + servicePath("cpu_count"): toInt64, + servicePath("cpu_percent"): toFloat, + servicePath("cpu_period"): toInt64, + servicePath("cpu_quota"): toInt64, + servicePath("cpu_rt_period"): toInt64, + servicePath("cpu_rt_runtime"): toInt64, + servicePath("cpus"): toFloat32, + servicePath("cpu_shares"): toInt64, + servicePath("init"): toBoolean, + servicePath("depends_on", tree.PathMatchAll, "required"): toBoolean, + servicePath("depends_on", tree.PathMatchAll, "restart"): toBoolean, + servicePath("deploy", "replicas"): toInt, + servicePath("deploy", "update_config", "parallelism"): toInt, + servicePath("deploy", "update_config", "max_failure_ratio"): toFloat, + servicePath("deploy", "rollback_config", "parallelism"): toInt, + servicePath("deploy", "rollback_config", "max_failure_ratio"): toFloat, + servicePath("deploy", "restart_policy", "max_attempts"): toInt, + servicePath("deploy", "placement", "max_replicas_per_node"): toInt, + servicePath("healthcheck", "retries"): toInt, + servicePath("healthcheck", "disable"): toBoolean, + servicePath("oom_kill_disable"): toBoolean, + servicePath("oom_score_adj"): toInt64, + servicePath("pids_limit"): toInt64, + servicePath("ports", tree.PathMatchList, "target"): toInt, + servicePath("privileged"): toBoolean, + servicePath("read_only"): toBoolean, + servicePath("scale"): toInt, + servicePath("stdin_open"): toBoolean, + servicePath("tty"): toBoolean, + servicePath("ulimits", tree.PathMatchAll): toInt, + servicePath("ulimits", tree.PathMatchAll, "hard"): toInt, + servicePath("ulimits", tree.PathMatchAll, "soft"): toInt, + servicePath("volumes", tree.PathMatchList, "read_only"): toBoolean, + servicePath("volumes", tree.PathMatchList, "volume", "nocopy"): toBoolean, + iPath("networks", tree.PathMatchAll, "external"): toBoolean, + iPath("networks", tree.PathMatchAll, "internal"): toBoolean, + iPath("networks", tree.PathMatchAll, "attachable"): toBoolean, + iPath("networks", tree.PathMatchAll, "enable_ipv4"): toBoolean, + iPath("networks", tree.PathMatchAll, "enable_ipv6"): toBoolean, + iPath("volumes", tree.PathMatchAll, "external"): toBoolean, + iPath("secrets", tree.PathMatchAll, "external"): toBoolean, + iPath("configs", tree.PathMatchAll, "external"): toBoolean, +} + +func iPath(parts ...string) tree.Path { + return tree.NewPath(parts...) +} + +func servicePath(parts ...string) tree.Path { + return iPath(append([]string{"services", tree.PathMatchAll}, parts...)...) +} + +func toInt(value string) (interface{}, error) { + return strconv.Atoi(value) +} + +func toInt64(value string) (interface{}, error) { + return strconv.ParseInt(value, 10, 64) +} + +func toFloat(value string) (interface{}, error) { + return strconv.ParseFloat(value, 64) +} + +func toFloat32(value string) (interface{}, error) { + f, err := strconv.ParseFloat(value, 32) + if err != nil { + return nil, err + } + return float32(f), nil +} + +// should match http://yaml.org/type/bool.html +func toBoolean(value string) (interface{}, error) { + switch strings.ToLower(value) { + case "true": + return true, nil + case "false": + return false, nil + case "y", "yes", "on": + logrus.Warnf("%q for boolean is not supported by YAML 1.2, please use `true`", value) + return true, nil + case "n", "no", "off": + logrus.Warnf("%q for boolean is not supported by YAML 1.2, please use `false`", value) + return false, nil + default: + return nil, fmt.Errorf("invalid boolean: %s", value) + } +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/loader/loader.go b/vendor/github.com/compose-spec/compose-go/v2/loader/loader.go new file mode 100644 index 00000000..f73ad92e --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/loader/loader.go @@ -0,0 +1,899 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package loader + +import ( + "bytes" + "context" + "errors" + "fmt" + "io" + "os" + "path/filepath" + "reflect" + "regexp" + "slices" + "strconv" + "strings" + + "github.com/compose-spec/compose-go/v2/consts" + "github.com/compose-spec/compose-go/v2/errdefs" + interp "github.com/compose-spec/compose-go/v2/interpolation" + "github.com/compose-spec/compose-go/v2/override" + "github.com/compose-spec/compose-go/v2/paths" + "github.com/compose-spec/compose-go/v2/schema" + "github.com/compose-spec/compose-go/v2/template" + "github.com/compose-spec/compose-go/v2/transform" + "github.com/compose-spec/compose-go/v2/tree" + "github.com/compose-spec/compose-go/v2/types" + "github.com/compose-spec/compose-go/v2/validation" + "github.com/go-viper/mapstructure/v2" + "github.com/sirupsen/logrus" + "go.yaml.in/yaml/v4" +) + +// Options supported by Load +type Options struct { + // Skip schema validation + SkipValidation bool + // Skip interpolation + SkipInterpolation bool + // Skip normalization + SkipNormalization bool + // Resolve path + ResolvePaths bool + // Convert Windows path + ConvertWindowsPaths bool + // Skip consistency check + SkipConsistencyCheck bool + // Skip extends + SkipExtends bool + // SkipInclude will ignore `include` and only load model from file(s) set by ConfigDetails + SkipInclude bool + // SkipResolveEnvironment will ignore computing `environment` for services + SkipResolveEnvironment bool + // SkipDefaultValues will ignore missing required attributes + SkipDefaultValues bool + // Interpolation options + Interpolate *interp.Options + // Discard 'env_file' entries after resolving to 'environment' section + discardEnvFiles bool + // Set project projectName + projectName string + // Indicates when the projectName was imperatively set or guessed from path + projectNameImperativelySet bool + // Profiles set profiles to enable + Profiles []string + // ResourceLoaders manages support for remote resources + ResourceLoaders []ResourceLoader + // KnownExtensions manages x-* attribute we know and the corresponding go structs + KnownExtensions map[string]any + // Metada for telemetry + Listeners []Listener +} + +var versionWarning []string + +func (o *Options) warnObsoleteVersion(file string) { + if !slices.Contains(versionWarning, file) { + logrus.Warning(fmt.Sprintf("%s: the attribute `version` is obsolete, it will be ignored, please remove it to avoid potential confusion", file)) + } + versionWarning = append(versionWarning, file) +} + +type Listener = func(event string, metadata map[string]any) + +// Invoke all listeners for an event +func (o *Options) ProcessEvent(event string, metadata map[string]any) { + for _, l := range o.Listeners { + l(event, metadata) + } +} + +// ResourceLoader is a plugable remote resource resolver +type ResourceLoader interface { + // Accept returns `true` is the resource reference matches ResourceLoader supported protocol(s) + Accept(path string) bool + // Load returns the path to a local copy of remote resource identified by `path`. + Load(ctx context.Context, path string) (string, error) + // Dir computes path to resource"s parent folder, made relative if possible + Dir(path string) string +} + +// RemoteResourceLoaders excludes localResourceLoader from ResourceLoaders +func (o Options) RemoteResourceLoaders() []ResourceLoader { + var loaders []ResourceLoader + for i, loader := range o.ResourceLoaders { + if _, ok := loader.(localResourceLoader); ok { + if i != len(o.ResourceLoaders)-1 { + logrus.Warning("misconfiguration of ResourceLoaders: localResourceLoader should be last") + } + continue + } + loaders = append(loaders, loader) + } + return loaders +} + +type localResourceLoader struct { + WorkingDir string +} + +func (l localResourceLoader) abs(p string) string { + if filepath.IsAbs(p) { + return p + } + return filepath.Join(l.WorkingDir, p) +} + +func (l localResourceLoader) Accept(_ string) bool { + // LocalResourceLoader is the last loader tested so it always should accept the config and try to get the content. + return true +} + +func (l localResourceLoader) Load(_ context.Context, p string) (string, error) { + return l.abs(p), nil +} + +func (l localResourceLoader) Dir(originalPath string) string { + path := l.abs(originalPath) + if !l.isDir(path) { + path = l.abs(filepath.Dir(originalPath)) + } + rel, err := filepath.Rel(l.WorkingDir, path) + if err != nil { + return path + } + return rel +} + +func (l localResourceLoader) isDir(path string) bool { + fileInfo, err := os.Stat(path) + if err != nil { + return false + } + return fileInfo.IsDir() +} + +func (o *Options) clone() *Options { + return &Options{ + SkipValidation: o.SkipValidation, + SkipInterpolation: o.SkipInterpolation, + SkipNormalization: o.SkipNormalization, + ResolvePaths: o.ResolvePaths, + ConvertWindowsPaths: o.ConvertWindowsPaths, + SkipConsistencyCheck: o.SkipConsistencyCheck, + SkipExtends: o.SkipExtends, + SkipInclude: o.SkipInclude, + Interpolate: o.Interpolate, + discardEnvFiles: o.discardEnvFiles, + projectName: o.projectName, + projectNameImperativelySet: o.projectNameImperativelySet, + Profiles: o.Profiles, + ResourceLoaders: o.ResourceLoaders, + KnownExtensions: o.KnownExtensions, + Listeners: o.Listeners, + } +} + +func (o *Options) SetProjectName(name string, imperativelySet bool) { + o.projectName = name + o.projectNameImperativelySet = imperativelySet +} + +func (o Options) GetProjectName() (string, bool) { + return o.projectName, o.projectNameImperativelySet +} + +// serviceRef identifies a reference to a service. It's used to detect cyclic +// references in "extends". +type serviceRef struct { + filename string + service string +} + +type cycleTracker struct { + loaded []serviceRef +} + +func (ct *cycleTracker) Add(filename, service string) (*cycleTracker, error) { + toAdd := serviceRef{filename: filename, service: service} + for _, loaded := range ct.loaded { + if toAdd == loaded { + // Create an error message of the form: + // Circular reference: + // service-a in docker-compose.yml + // extends service-b in docker-compose.yml + // extends service-a in docker-compose.yml + errLines := []string{ + "Circular reference:", + fmt.Sprintf(" %s in %s", ct.loaded[0].service, ct.loaded[0].filename), + } + for _, service := range append(ct.loaded[1:], toAdd) { + errLines = append(errLines, fmt.Sprintf(" extends %s in %s", service.service, service.filename)) + } + + return nil, errors.New(strings.Join(errLines, "\n")) + } + } + + var branch []serviceRef + branch = append(branch, ct.loaded...) + branch = append(branch, toAdd) + return &cycleTracker{ + loaded: branch, + }, nil +} + +// WithDiscardEnvFiles sets the Options to discard the `env_file` section after resolving to +// the `environment` section +func WithDiscardEnvFiles(opts *Options) { + opts.discardEnvFiles = true +} + +// WithSkipValidation sets the Options to skip validation when loading sections +func WithSkipValidation(opts *Options) { + opts.SkipValidation = true +} + +// WithProfiles sets profiles to be activated +func WithProfiles(profiles []string) func(*Options) { + return func(opts *Options) { + opts.Profiles = profiles + } +} + +// PostProcessor is used to tweak compose model based on metadata extracted during yaml Unmarshal phase +// that hardly can be implemented using go-yaml and mapstructure +type PostProcessor interface { + // Apply changes to compose model based on recorder metadata + Apply(interface{}) error +} + +type NoopPostProcessor struct{} + +func (NoopPostProcessor) Apply(interface{}) error { return nil } + +// LoadConfigFiles ingests config files with ResourceLoader and returns config details with paths to local copies +func LoadConfigFiles(ctx context.Context, configFiles []string, workingDir string, options ...func(*Options)) (*types.ConfigDetails, error) { + if len(configFiles) < 1 { + return &types.ConfigDetails{}, fmt.Errorf("no configuration file provided: %w", errdefs.ErrNotFound) + } + + opts := &Options{} + config := &types.ConfigDetails{ + ConfigFiles: make([]types.ConfigFile, len(configFiles)), + } + + for _, op := range options { + op(opts) + } + opts.ResourceLoaders = append(opts.ResourceLoaders, localResourceLoader{}) + + for i, p := range configFiles { + if p == "-" { + config.ConfigFiles[i] = types.ConfigFile{ + Filename: p, + } + continue + } + + for _, loader := range opts.ResourceLoaders { + _, isLocalResourceLoader := loader.(localResourceLoader) + if !loader.Accept(p) { + continue + } + local, err := loader.Load(ctx, p) + if err != nil { + return nil, err + } + if config.WorkingDir == "" && !isLocalResourceLoader { + config.WorkingDir = filepath.Dir(local) + } + abs, err := filepath.Abs(local) + if err != nil { + abs = local + } + config.ConfigFiles[i] = types.ConfigFile{ + Filename: abs, + } + break + } + } + if config.WorkingDir == "" { + config.WorkingDir = workingDir + } + return config, nil +} + +// LoadWithContext reads a ConfigDetails and returns a fully loaded configuration as a compose-go Project +func LoadWithContext(ctx context.Context, configDetails types.ConfigDetails, options ...func(*Options)) (*types.Project, error) { + opts := ToOptions(&configDetails, options) + dict, err := loadModelWithContext(ctx, &configDetails, opts) + if err != nil { + return nil, err + } + return ModelToProject(dict, opts, configDetails) +} + +// LoadModelWithContext reads a ConfigDetails and returns a fully loaded configuration as a yaml dictionary +func LoadModelWithContext(ctx context.Context, configDetails types.ConfigDetails, options ...func(*Options)) (map[string]any, error) { + opts := ToOptions(&configDetails, options) + return loadModelWithContext(ctx, &configDetails, opts) +} + +// LoadModelWithContext reads a ConfigDetails and returns a fully loaded configuration as a yaml dictionary +func loadModelWithContext(ctx context.Context, configDetails *types.ConfigDetails, opts *Options) (map[string]any, error) { + if len(configDetails.ConfigFiles) < 1 { + return nil, errors.New("no compose file specified") + } + + err := projectName(configDetails, opts) + if err != nil { + return nil, err + } + + return load(ctx, *configDetails, opts, nil) +} + +func ToOptions(configDetails *types.ConfigDetails, options []func(*Options)) *Options { + opts := &Options{ + Interpolate: &interp.Options{ + Substitute: template.Substitute, + LookupValue: configDetails.LookupEnv, + TypeCastMapping: interpolateTypeCastMapping, + }, + ResolvePaths: true, + } + + for _, op := range options { + op(opts) + } + opts.ResourceLoaders = append(opts.ResourceLoaders, localResourceLoader{configDetails.WorkingDir}) + return opts +} + +func loadYamlModel(ctx context.Context, config types.ConfigDetails, opts *Options, ct *cycleTracker, included []string) (map[string]interface{}, error) { + var ( + dict = map[string]interface{}{} + err error + ) + workingDir, environment := config.WorkingDir, config.Environment + + for _, file := range config.ConfigFiles { + dict, _, err = loadYamlFile(ctx, file, opts, workingDir, environment, ct, dict, included) + if err != nil { + return nil, err + } + } + + if !opts.SkipDefaultValues { + dict, err = transform.SetDefaultValues(dict) + if err != nil { + return nil, err + } + } + + if !opts.SkipValidation { + if err := validation.Validate(dict); err != nil { + return nil, err + } + } + + if opts.ResolvePaths { + var remotes []paths.RemoteResource + for _, loader := range opts.RemoteResourceLoaders() { + remotes = append(remotes, loader.Accept) + } + err = paths.ResolveRelativePaths(dict, config.WorkingDir, remotes) + if err != nil { + return nil, err + } + } + ResolveEnvironment(dict, config.Environment) + + return dict, nil +} + +func loadYamlFile(ctx context.Context, + file types.ConfigFile, + opts *Options, + workingDir string, + environment types.Mapping, + ct *cycleTracker, + dict map[string]interface{}, + included []string, +) (map[string]interface{}, PostProcessor, error) { + ctx = context.WithValue(ctx, consts.ComposeFileKey{}, file.Filename) + if file.Content == nil && file.Config == nil { + content, err := os.ReadFile(file.Filename) + if err != nil { + return nil, nil, err + } + file.Content = content + } + + processRawYaml := func(raw interface{}, processor PostProcessor) error { + converted, err := convertToStringKeysRecursive(raw, "") + if err != nil { + return err + } + cfg, ok := converted.(map[string]interface{}) + if !ok { + return errors.New("top-level object must be a mapping") + } + + if opts.Interpolate != nil && !opts.SkipInterpolation { + cfg, err = interp.Interpolate(cfg, *opts.Interpolate) + if err != nil { + return err + } + } + + fixEmptyNotNull(cfg) + + if !opts.SkipExtends { + err = ApplyExtends(ctx, cfg, opts, ct, processor) + if err != nil { + return err + } + } + + if err := processor.Apply(dict); err != nil { + return err + } + + if !opts.SkipInclude { + included = append(included, file.Filename) + err = ApplyInclude(ctx, workingDir, environment, cfg, opts, included, processor) + if err != nil { + return err + } + } + + dict, err = override.Merge(dict, cfg) + if err != nil { + return err + } + + dict, err = override.EnforceUnicity(dict) + if err != nil { + return err + } + + if !opts.SkipValidation { + if err := schema.Validate(dict); err != nil { + return fmt.Errorf("validating %s: %w", file.Filename, err) + } + if _, ok := dict["version"]; ok { + opts.warnObsoleteVersion(file.Filename) + delete(dict, "version") + } + } + + dict, err = transform.Canonical(dict, opts.SkipInterpolation) + if err != nil { + return err + } + + dict = OmitEmpty(dict) + + // Canonical transformation can reveal duplicates, typically as ports can be a range and conflict with an override + dict, err = override.EnforceUnicity(dict) + return err + } + + var processor PostProcessor + if file.Config == nil { + r := bytes.NewReader(file.Content) + decoder := yaml.NewDecoder(r) + for { + var raw interface{} + reset := &ResetProcessor{target: &raw} + err := decoder.Decode(reset) + if err != nil && errors.Is(err, io.EOF) { + break + } + if err != nil { + return nil, nil, fmt.Errorf("failed to parse %s: %w", file.Filename, err) + } + processor = reset + if err := processRawYaml(raw, processor); err != nil { + return nil, nil, err + } + } + } else { + if err := processRawYaml(file.Config, NoopPostProcessor{}); err != nil { + return nil, nil, err + } + } + return dict, processor, nil +} + +func load(ctx context.Context, configDetails types.ConfigDetails, opts *Options, loaded []string) (map[string]interface{}, error) { + mainFile := configDetails.ConfigFiles[0].Filename + for _, f := range loaded { + if f == mainFile { + loaded = append(loaded, mainFile) + return nil, fmt.Errorf("include cycle detected:\n%s\n include %s", loaded[0], strings.Join(loaded[1:], "\n include ")) + } + } + + dict, err := loadYamlModel(ctx, configDetails, opts, &cycleTracker{}, nil) + if err != nil { + return nil, err + } + + if len(dict) == 0 { + return nil, errors.New("empty compose file") + } + + if !opts.SkipValidation && opts.projectName == "" { + return nil, errors.New("project name must not be empty") + } + + if !opts.SkipNormalization { + dict["name"] = opts.projectName + dict, err = Normalize(dict, configDetails.Environment) + if err != nil { + return nil, err + } + } + + return dict, nil +} + +// ModelToProject binds a canonical yaml dict into compose-go structs +func ModelToProject(dict map[string]interface{}, opts *Options, configDetails types.ConfigDetails) (*types.Project, error) { + project := &types.Project{ + Name: opts.projectName, + WorkingDir: configDetails.WorkingDir, + Environment: configDetails.Environment, + } + delete(dict, "name") // project name set by yaml must be identified by caller as opts.projectName + + var err error + dict, err = processExtensions(dict, tree.NewPath(), opts.KnownExtensions) + if err != nil { + return nil, err + } + + err = Transform(dict, project) + if err != nil { + return nil, err + } + + if opts.ConvertWindowsPaths { + for i, service := range project.Services { + for j, volume := range service.Volumes { + service.Volumes[j] = convertVolumePath(volume) + } + project.Services[i] = service + } + } + + if project, err = project.WithProfiles(opts.Profiles); err != nil { + return nil, err + } + + if !opts.SkipConsistencyCheck { + err := checkConsistency(project) + if err != nil { + return nil, err + } + } + + if !opts.SkipResolveEnvironment { + project, err = project.WithServicesEnvironmentResolved(opts.discardEnvFiles) + if err != nil { + return nil, err + } + } + + project, err = project.WithServicesLabelsResolved(opts.discardEnvFiles) + if err != nil { + return nil, err + } + + return project, nil +} + +func InvalidProjectNameErr(v string) error { + return fmt.Errorf( + "invalid project name %q: must consist only of lowercase alphanumeric characters, hyphens, and underscores as well as start with a letter or number", + v, + ) +} + +// projectName determines the canonical name to use for the project considering +// the loader Options as well as `name` fields in Compose YAML fields (which +// also support interpolation). +func projectName(details *types.ConfigDetails, opts *Options) error { + defer func() { + if details.Environment == nil { + details.Environment = map[string]string{} + } + details.Environment[consts.ComposeProjectName] = opts.projectName + }() + + if opts.projectNameImperativelySet { + if NormalizeProjectName(opts.projectName) != opts.projectName { + return InvalidProjectNameErr(opts.projectName) + } + return nil + } + + type named struct { + Name string `yaml:"name"` + } + + // if user did NOT provide a name explicitly, then see if one is defined + // in any of the config files + var pjNameFromConfigFile string + for _, configFile := range details.ConfigFiles { + content := configFile.Content + if content == nil { + // This can be hit when Filename is set but Content is not. One + // example is when using ToConfigFiles(). + d, err := os.ReadFile(configFile.Filename) + if err != nil { + return fmt.Errorf("failed to read file %q: %w", configFile.Filename, err) + } + content = d + configFile.Content = d + } + var n named + r := bytes.NewReader(content) + decoder := yaml.NewDecoder(r) + for { + err := decoder.Decode(&n) + if err != nil && errors.Is(err, io.EOF) { + break + } + if err != nil { + // HACK: the way that loading is currently structured, this is + // a duplicative parse just for the `name`. if it fails, we + // give up but don't return the error, knowing that it'll get + // caught downstream for us + break + } + if n.Name != "" { + pjNameFromConfigFile = n.Name + } + } + } + if !opts.SkipInterpolation { + interpolated, err := interp.Interpolate( + map[string]interface{}{"name": pjNameFromConfigFile}, + *opts.Interpolate, + ) + if err != nil { + return err + } + pjNameFromConfigFile = interpolated["name"].(string) + } + + if !opts.SkipNormalization { + pjNameFromConfigFile = NormalizeProjectName(pjNameFromConfigFile) + } + if pjNameFromConfigFile != "" { + opts.projectName = pjNameFromConfigFile + } + return nil +} + +func NormalizeProjectName(s string) string { + r := regexp.MustCompile("[a-z0-9_-]") + s = strings.ToLower(s) + s = strings.Join(r.FindAllString(s, -1), "") + return strings.TrimLeft(s, "_-") +} + +var userDefinedKeys = []tree.Path{ + "services", + "services.*.depends_on", + "volumes", + "networks", + "secrets", + "configs", +} + +func processExtensions(dict map[string]any, p tree.Path, extensions map[string]any) (map[string]interface{}, error) { + extras := map[string]any{} + var err error + for key, value := range dict { + skip := false + for _, uk := range userDefinedKeys { + if p.Matches(uk) { + skip = true + break + } + } + if !skip && strings.HasPrefix(key, "x-") { + extras[key] = value + delete(dict, key) + continue + } + switch v := value.(type) { + case map[string]interface{}: + dict[key], err = processExtensions(v, p.Next(key), extensions) + if err != nil { + return nil, err + } + case []interface{}: + for i, e := range v { + if m, ok := e.(map[string]interface{}); ok { + v[i], err = processExtensions(m, p.Next(strconv.Itoa(i)), extensions) + if err != nil { + return nil, err + } + } + } + } + } + for name, val := range extras { + if typ, ok := extensions[name]; ok { + target := reflect.New(reflect.TypeOf(typ)).Elem().Interface() + err = Transform(val, &target) + if err != nil { + return nil, err + } + extras[name] = target + } + } + if len(extras) > 0 { + dict[consts.Extensions] = extras + } + return dict, nil +} + +// Transform converts the source into the target struct with compose types transformer +// and the specified transformers if any. +func Transform(source interface{}, target interface{}) error { + data := mapstructure.Metadata{} + config := &mapstructure.DecoderConfig{ + DecodeHook: mapstructure.ComposeDecodeHookFunc( + nameServices, + decoderHook, + cast, + secretConfigDecoderHook, + ), + Result: target, + TagName: "yaml", + Metadata: &data, + } + decoder, err := mapstructure.NewDecoder(config) + if err != nil { + return err + } + return decoder.Decode(source) +} + +// nameServices create implicit `name` key for convenience accessing service +func nameServices(from reflect.Value, to reflect.Value) (interface{}, error) { + if to.Type() == reflect.TypeOf(types.Services{}) { + nameK := reflect.ValueOf("name") + iter := from.MapRange() + for iter.Next() { + name := iter.Key() + elem := iter.Value() + elem.Elem().SetMapIndex(nameK, name) + } + } + return from.Interface(), nil +} + +func secretConfigDecoderHook(from, to reflect.Type, data interface{}) (interface{}, error) { + // Check if the input is a map and we're decoding into a SecretConfig + if from.Kind() == reflect.Map && to == reflect.TypeOf(types.SecretConfig{}) { + if v, ok := data.(map[string]interface{}); ok { + if ext, ok := v[consts.Extensions].(map[string]interface{}); ok { + if val, ok := ext[types.SecretConfigXValue].(string); ok { + // Return a map with the Content field populated + v["Content"] = val + delete(ext, types.SecretConfigXValue) + + if len(ext) == 0 { + delete(v, consts.Extensions) + } + } + } + } + } + + // Return the original data so the rest is handled by default mapstructure logic + return data, nil +} + +// keys need to be converted to strings for jsonschema +func convertToStringKeysRecursive(value interface{}, keyPrefix string) (interface{}, error) { + if mapping, ok := value.(map[string]interface{}); ok { + for key, entry := range mapping { + var newKeyPrefix string + if keyPrefix == "" { + newKeyPrefix = key + } else { + newKeyPrefix = fmt.Sprintf("%s.%s", keyPrefix, key) + } + convertedEntry, err := convertToStringKeysRecursive(entry, newKeyPrefix) + if err != nil { + return nil, err + } + mapping[key] = convertedEntry + } + return mapping, nil + } + if mapping, ok := value.(map[interface{}]interface{}); ok { + dict := make(map[string]interface{}) + for key, entry := range mapping { + str, ok := key.(string) + if !ok { + return nil, formatInvalidKeyError(keyPrefix, key) + } + var newKeyPrefix string + if keyPrefix == "" { + newKeyPrefix = str + } else { + newKeyPrefix = fmt.Sprintf("%s.%s", keyPrefix, str) + } + convertedEntry, err := convertToStringKeysRecursive(entry, newKeyPrefix) + if err != nil { + return nil, err + } + dict[str] = convertedEntry + } + return dict, nil + } + if list, ok := value.([]interface{}); ok { + var convertedList []interface{} + for index, entry := range list { + newKeyPrefix := fmt.Sprintf("%s[%d]", keyPrefix, index) + convertedEntry, err := convertToStringKeysRecursive(entry, newKeyPrefix) + if err != nil { + return nil, err + } + convertedList = append(convertedList, convertedEntry) + } + return convertedList, nil + } + return value, nil +} + +func formatInvalidKeyError(keyPrefix string, key interface{}) error { + var location string + if keyPrefix == "" { + location = "at top level" + } else { + location = fmt.Sprintf("in %s", keyPrefix) + } + return fmt.Errorf("non-string key %s: %#v", location, key) +} + +// Windows path, c:\\my\\path\\shiny, need to be changed to be compatible with +// the Engine. Volume path are expected to be linux style /c/my/path/shiny/ +func convertVolumePath(volume types.ServiceVolumeConfig) types.ServiceVolumeConfig { + volumeName := strings.ToLower(filepath.VolumeName(volume.Source)) + if len(volumeName) != 2 { + return volume + } + + convertedSource := fmt.Sprintf("/%c%s", volumeName[0], volume.Source[len(volumeName):]) + convertedSource = strings.ReplaceAll(convertedSource, "\\", "/") + + volume.Source = convertedSource + return volume +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/loader/mapstructure.go b/vendor/github.com/compose-spec/compose-go/v2/loader/mapstructure.go new file mode 100644 index 00000000..e5b902ab --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/loader/mapstructure.go @@ -0,0 +1,79 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package loader + +import ( + "reflect" + "strconv" +) + +// comparable to yaml.Unmarshaler, decoder allow a type to define it's own custom logic to convert value +// see https://github.com/mitchellh/mapstructure/pull/294 +type decoder interface { + DecodeMapstructure(interface{}) error +} + +// see https://github.com/mitchellh/mapstructure/issues/115#issuecomment-735287466 +// adapted to support types derived from built-in types, as DecodeMapstructure would not be able to mutate internal +// value, so need to invoke DecodeMapstructure defined by pointer to type +func decoderHook(from reflect.Value, to reflect.Value) (interface{}, error) { + // If the destination implements the decoder interface + u, ok := to.Interface().(decoder) + if !ok { + // for non-struct types we need to invoke func (*type) DecodeMapstructure() + if to.CanAddr() { + pto := to.Addr() + u, ok = pto.Interface().(decoder) + } + if !ok { + return from.Interface(), nil + } + } + // If it is nil and a pointer, create and assign the target value first + if to.Type().Kind() == reflect.Ptr && to.IsNil() { + to.Set(reflect.New(to.Type().Elem())) + u = to.Interface().(decoder) + } + // Call the custom DecodeMapstructure method + if err := u.DecodeMapstructure(from.Interface()); err != nil { + return to.Interface(), err + } + return to.Interface(), nil +} + +func cast(from reflect.Value, to reflect.Value) (interface{}, error) { + switch from.Type().Kind() { + case reflect.String: + switch to.Kind() { + case reflect.Bool: + return toBoolean(from.String()) + case reflect.Int: + return toInt(from.String()) + case reflect.Int64: + return toInt64(from.String()) + case reflect.Float32: + return toFloat32(from.String()) + case reflect.Float64: + return toFloat(from.String()) + } + case reflect.Int: + if to.Kind() == reflect.String { + return strconv.FormatInt(from.Int(), 10), nil + } + } + return from.Interface(), nil +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/loader/normalize.go b/vendor/github.com/compose-spec/compose-go/v2/loader/normalize.go new file mode 100644 index 00000000..7b1c4941 --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/loader/normalize.go @@ -0,0 +1,266 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package loader + +import ( + "fmt" + "path" + "strconv" + "strings" + + "github.com/compose-spec/compose-go/v2/types" +) + +// Normalize compose project by moving deprecated attributes to their canonical position and injecting implicit defaults +func Normalize(dict map[string]any, env types.Mapping) (map[string]any, error) { + normalizeNetworks(dict) + + if d, ok := dict["services"]; ok { + services := d.(map[string]any) + for name, s := range services { + service := s.(map[string]any) + + if service["pull_policy"] == types.PullPolicyIfNotPresent { + service["pull_policy"] = types.PullPolicyMissing + } + + fn := func(s string) (string, bool) { + v, ok := env[s] + return v, ok + } + + if b, ok := service["build"]; ok { + build := b.(map[string]any) + if build["context"] == nil { + build["context"] = "." + } + if build["dockerfile"] == nil && build["dockerfile_inline"] == nil { + build["dockerfile"] = "Dockerfile" + } + + if a, ok := build["args"]; ok { + build["args"], _ = resolve(a, fn, false) + } + + service["build"] = build + } + + if e, ok := service["environment"]; ok { + service["environment"], _ = resolve(e, fn, true) + } + + var dependsOn map[string]any + if d, ok := service["depends_on"]; ok { + dependsOn = d.(map[string]any) + } else { + dependsOn = map[string]any{} + } + if l, ok := service["links"]; ok { + links := l.([]any) + for _, e := range links { + link := e.(string) + parts := strings.Split(link, ":") + if len(parts) == 2 { + link = parts[0] + } + if _, ok := dependsOn[link]; !ok { + dependsOn[link] = map[string]any{ + "condition": types.ServiceConditionStarted, + "restart": true, + "required": true, + } + } + } + } + + for _, namespace := range []string{"network_mode", "ipc", "pid", "uts", "cgroup"} { + if n, ok := service[namespace]; ok { + ref := n.(string) + if strings.HasPrefix(ref, types.ServicePrefix) { + shared := ref[len(types.ServicePrefix):] + if _, ok := dependsOn[shared]; !ok { + dependsOn[shared] = map[string]any{ + "condition": types.ServiceConditionStarted, + "restart": true, + "required": true, + } + } + } + } + } + + if v, ok := service["volumes"]; ok { + volumes := v.([]any) + for i, volume := range volumes { + vol := volume.(map[string]any) + target := vol["target"].(string) + vol["target"] = path.Clean(target) + volumes[i] = vol + } + service["volumes"] = volumes + } + + if n, ok := service["volumes_from"]; ok { + volumesFrom := n.([]any) + for _, v := range volumesFrom { + vol := v.(string) + if !strings.HasPrefix(vol, types.ContainerPrefix) { + spec := strings.Split(vol, ":") + if _, ok := dependsOn[spec[0]]; !ok { + dependsOn[spec[0]] = map[string]any{ + "condition": types.ServiceConditionStarted, + "restart": false, + "required": true, + } + } + } + } + } + if len(dependsOn) > 0 { + service["depends_on"] = dependsOn + } + services[name] = service + } + + dict["services"] = services + } + setNameFromKey(dict) + + return dict, nil +} + +func normalizeNetworks(dict map[string]any) { + var networks map[string]any + if n, ok := dict["networks"]; ok { + networks = n.(map[string]any) + } else { + networks = map[string]any{} + } + + // implicit `default` network must be introduced only if actually used by some service + usesDefaultNetwork := false + + if s, ok := dict["services"]; ok { + services := s.(map[string]any) + for name, se := range services { + service := se.(map[string]any) + if _, ok := service["provider"]; ok { + continue + } + if _, ok := service["network_mode"]; ok { + continue + } + if n, ok := service["networks"]; !ok { + // If none explicitly declared, service is connected to default network + service["networks"] = map[string]any{"default": nil} + usesDefaultNetwork = true + } else { + net := n.(map[string]any) + if len(net) == 0 { + // networks section declared but empty (corner case) + service["networks"] = map[string]any{"default": nil} + usesDefaultNetwork = true + } else if _, ok := net["default"]; ok { + usesDefaultNetwork = true + } + } + services[name] = service + } + dict["services"] = services + } + + if _, ok := networks["default"]; !ok && usesDefaultNetwork { + // If not declared explicitly, Compose model involves an implicit "default" network + networks["default"] = nil + } + + if len(networks) > 0 { + dict["networks"] = networks + } +} + +func resolve(a any, fn func(s string) (string, bool), keepEmpty bool) (any, bool) { + switch v := a.(type) { + case []any: + var resolved []any + for _, val := range v { + if r, ok := resolve(val, fn, keepEmpty); ok { + resolved = append(resolved, r) + } + } + return resolved, true + case map[string]any: + resolved := map[string]any{} + for key, val := range v { + if val != nil { + resolved[key] = val + continue + } + if s, ok := fn(key); ok { + resolved[key] = s + } else if keepEmpty { + resolved[key] = nil + } + } + return resolved, true + case string: + if !strings.Contains(v, "=") { + if val, ok := fn(v); ok { + return fmt.Sprintf("%s=%s", v, val), true + } + if keepEmpty { + return v, true + } + return "", false + } + return v, true + default: + return v, false + } +} + +// Resources with no explicit name are actually named by their key in map +func setNameFromKey(dict map[string]any) { + for _, r := range []string{"networks", "volumes", "configs", "secrets"} { + a, ok := dict[r] + if !ok { + continue + } + toplevel := a.(map[string]any) + for key, r := range toplevel { + var resource map[string]any + if r != nil { + resource = r.(map[string]any) + } else { + resource = map[string]any{} + } + if resource["name"] == nil { + if x, ok := resource["external"]; ok && isTrue(x) { + resource["name"] = key + } else { + resource["name"] = fmt.Sprintf("%s_%s", dict["name"], key) + } + } + toplevel[key] = resource + } + } +} + +func isTrue(x any) bool { + parseBool, _ := strconv.ParseBool(fmt.Sprint(x)) + return parseBool +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/loader/omitEmpty.go b/vendor/github.com/compose-spec/compose-go/v2/loader/omitEmpty.go new file mode 100644 index 00000000..eef6be8c --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/loader/omitEmpty.go @@ -0,0 +1,75 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package loader + +import "github.com/compose-spec/compose-go/v2/tree" + +var omitempty = []tree.Path{ + "services.*.dns", +} + +// OmitEmpty removes empty attributes which are irrelevant when unset +func OmitEmpty(yaml map[string]any) map[string]any { + cleaned := omitEmpty(yaml, tree.NewPath()) + return cleaned.(map[string]any) +} + +func omitEmpty(data any, p tree.Path) any { + switch v := data.(type) { + case map[string]any: + for k, e := range v { + if isEmpty(e) && mustOmit(p) { + delete(v, k) + continue + } + + v[k] = omitEmpty(e, p.Next(k)) + } + return v + case []any: + var c []any + for _, e := range v { + if isEmpty(e) && mustOmit(p) { + continue + } + + c = append(c, omitEmpty(e, p.Next("[]"))) + } + return c + default: + return data + } +} + +func mustOmit(p tree.Path) bool { + for _, pattern := range omitempty { + if p.Matches(pattern) { + return true + } + } + return false +} + +func isEmpty(e any) bool { + if e == nil { + return true + } + if v, ok := e.(string); ok && v == "" { + return true + } + return false +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/loader/paths.go b/vendor/github.com/compose-spec/compose-go/v2/loader/paths.go new file mode 100644 index 00000000..c03126a8 --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/loader/paths.go @@ -0,0 +1,50 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package loader + +import ( + "path/filepath" + + "github.com/compose-spec/compose-go/v2/types" +) + +// ResolveRelativePaths resolves relative paths based on project WorkingDirectory +func ResolveRelativePaths(project *types.Project) error { + absWorkingDir, err := filepath.Abs(project.WorkingDir) + if err != nil { + return err + } + project.WorkingDir = absWorkingDir + + absComposeFiles, err := absComposeFiles(project.ComposeFiles) + if err != nil { + return err + } + project.ComposeFiles = absComposeFiles + return nil +} + +func absComposeFiles(composeFiles []string) ([]string, error) { + for i, composeFile := range composeFiles { + absComposefile, err := filepath.Abs(composeFile) + if err != nil { + return nil, err + } + composeFiles[i] = absComposefile + } + return composeFiles, nil +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/loader/reset.go b/vendor/github.com/compose-spec/compose-go/v2/loader/reset.go new file mode 100644 index 00000000..ed1fc0c3 --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/loader/reset.go @@ -0,0 +1,196 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package loader + +import ( + "fmt" + "strconv" + "strings" + + "github.com/compose-spec/compose-go/v2/tree" + "go.yaml.in/yaml/v4" +) + +type ResetProcessor struct { + target interface{} + paths []tree.Path + visitedNodes map[*yaml.Node][]string +} + +// UnmarshalYAML implement yaml.Unmarshaler +func (p *ResetProcessor) UnmarshalYAML(value *yaml.Node) error { + p.visitedNodes = make(map[*yaml.Node][]string) + resolved, err := p.resolveReset(value, tree.NewPath()) + p.visitedNodes = nil + if err != nil { + return err + } + return resolved.Decode(p.target) +} + +// resolveReset detects `!reset` tag being set on yaml nodes and record position in the yaml tree +func (p *ResetProcessor) resolveReset(node *yaml.Node, path tree.Path) (*yaml.Node, error) { + pathStr := path.String() + // If the path contains "<<", removing the "<<" element and merging the path + if strings.Contains(pathStr, ".<<") { + path = tree.NewPath(strings.Replace(pathStr, ".<<", "", 1)) + } + + // If the node is an alias, We need to process the alias field in order to consider the !override and !reset tags + if node.Kind == yaml.AliasNode { + if err := p.checkForCycle(node.Alias, path); err != nil { + return nil, err + } + + return p.resolveReset(node.Alias, path) + } + + if node.Tag == "!reset" { + p.paths = append(p.paths, path) + return nil, nil + } + if node.Tag == "!override" { + p.paths = append(p.paths, path) + return node, nil + } + + keys := map[string]int{} + switch node.Kind { + case yaml.SequenceNode: + var nodes []*yaml.Node + for idx, v := range node.Content { + next := path.Next(strconv.Itoa(idx)) + resolved, err := p.resolveReset(v, next) + if err != nil { + return nil, err + } + if resolved != nil { + nodes = append(nodes, resolved) + } + } + node.Content = nodes + case yaml.MappingNode: + var key string + var nodes []*yaml.Node + for idx, v := range node.Content { + if idx%2 == 0 { + key = v.Value + if line, seen := keys[key]; seen { + return nil, fmt.Errorf("line %d: mapping key %#v already defined at line %d", v.Line, key, line) + } + keys[key] = v.Line + } else { + resolved, err := p.resolveReset(v, path.Next(key)) + if err != nil { + return nil, err + } + if resolved != nil { + nodes = append(nodes, node.Content[idx-1], resolved) + } + } + } + node.Content = nodes + } + return node, nil +} + +// Apply finds the go attributes matching recorded paths and reset them to zero value +func (p *ResetProcessor) Apply(target any) error { + return p.applyNullOverrides(target, tree.NewPath()) +} + +// applyNullOverrides set val to Zero if it matches any of the recorded paths +func (p *ResetProcessor) applyNullOverrides(target any, path tree.Path) error { + switch v := target.(type) { + case map[string]any: + KEYS: + for k, e := range v { + next := path.Next(k) + for _, pattern := range p.paths { + if next.Matches(pattern) { + delete(v, k) + continue KEYS + } + } + err := p.applyNullOverrides(e, next) + if err != nil { + return err + } + } + case []any: + ITER: + for i, e := range v { + next := path.Next(fmt.Sprintf("[%d]", i)) + for _, pattern := range p.paths { + if next.Matches(pattern) { + continue ITER + // TODO(ndeloof) support removal from sequence + } + } + err := p.applyNullOverrides(e, next) + if err != nil { + return err + } + } + } + return nil +} + +func (p *ResetProcessor) checkForCycle(node *yaml.Node, path tree.Path) error { + paths := p.visitedNodes[node] + pathStr := path.String() + + for _, prevPath := range paths { + // If we're visiting the exact same path, it's not a cycle + if pathStr == prevPath { + continue + } + + // If either path is using a merge key, it's legitimate YAML merging + if strings.Contains(prevPath, "<<") || strings.Contains(pathStr, "<<") { + continue + } + + // Only consider it a cycle if one path is contained within the other + // and they're not in different service definitions + if (strings.HasPrefix(pathStr, prevPath+".") || + strings.HasPrefix(prevPath, pathStr+".")) && + !areInDifferentServices(pathStr, prevPath) { + return fmt.Errorf("cycle detected: node at path %s references node at path %s", pathStr, prevPath) + } + } + + p.visitedNodes[node] = append(paths, pathStr) + return nil +} + +// areInDifferentServices checks if two paths are in different service definitions +func areInDifferentServices(path1, path2 string) bool { + // Split paths into components + parts1 := strings.Split(path1, ".") + parts2 := strings.Split(path2, ".") + + // Look for the services component and compare the service names + for i := 0; i < len(parts1) && i < len(parts2); i++ { + if parts1[i] == "services" && i+1 < len(parts1) && + parts2[i] == "services" && i+1 < len(parts2) { + // If they're different services, it's not a cycle + return parts1[i+1] != parts2[i+1] + } + } + return false +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/loader/validate.go b/vendor/github.com/compose-spec/compose-go/v2/loader/validate.go new file mode 100644 index 00000000..85131947 --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/loader/validate.go @@ -0,0 +1,218 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package loader + +import ( + "errors" + "fmt" + "strings" + + "github.com/compose-spec/compose-go/v2/errdefs" + "github.com/compose-spec/compose-go/v2/graph" + "github.com/compose-spec/compose-go/v2/types" +) + +// checkConsistency validate a compose model is consistent +func checkConsistency(project *types.Project) error { //nolint:gocyclo + for name, s := range project.Services { + if s.Build == nil && s.Image == "" && s.Provider == nil { + return fmt.Errorf("service %q has neither an image nor a build context specified: %w", s.Name, errdefs.ErrInvalid) + } + + if s.Build != nil { + if s.Build.DockerfileInline != "" && s.Build.Dockerfile != "" { + return fmt.Errorf("service %q declares mutualy exclusive dockerfile and dockerfile_inline: %w", s.Name, errdefs.ErrInvalid) + } + + for add, c := range s.Build.AdditionalContexts { + if target, ok := strings.CutPrefix(c, types.ServicePrefix); ok { + t, err := project.GetService(target) + if err != nil { + return fmt.Errorf("service %q declares unknown service %q as additional contexts %s", name, target, add) + } + if t.Build == nil { + return fmt.Errorf("service %q declares non-buildable service %q as additional contexts %s", name, target, add) + } + } + } + + if len(s.Build.Platforms) > 0 && s.Platform != "" { + var found bool + for _, platform := range s.Build.Platforms { + if platform == s.Platform { + found = true + break + } + } + if !found { + return fmt.Errorf("service.build.platforms MUST include service.platform %q: %w", s.Platform, errdefs.ErrInvalid) + } + } + } + + if s.NetworkMode != "" && len(s.Networks) > 0 { + return fmt.Errorf("service %s declares mutually exclusive `network_mode` and `networks`: %w", s.Name, errdefs.ErrInvalid) + } + for network := range s.Networks { + if _, ok := project.Networks[network]; !ok { + return fmt.Errorf("service %q refers to undefined network %s: %w", s.Name, network, errdefs.ErrInvalid) + } + } + + if s.HealthCheck != nil && len(s.HealthCheck.Test) > 0 { + switch s.HealthCheck.Test[0] { + case "CMD", "CMD-SHELL", "NONE": + default: + return errors.New(`healthcheck.test must start either by "CMD", "CMD-SHELL" or "NONE"`) + } + } + + for dependedService, cfg := range s.DependsOn { + if _, err := project.GetService(dependedService); err != nil { + if errors.Is(err, errdefs.ErrDisabled) && !cfg.Required { + continue + } + return fmt.Errorf("service %q depends on undefined service %q: %w", s.Name, dependedService, errdefs.ErrInvalid) + } + } + + if strings.HasPrefix(s.NetworkMode, types.ServicePrefix) { + serviceName := s.NetworkMode[len(types.ServicePrefix):] + if _, err := project.GetServices(serviceName); err != nil { + return fmt.Errorf("service %q not found for network_mode 'service:%s'", serviceName, serviceName) + } + } + + for _, volume := range s.Volumes { + if volume.Type == types.VolumeTypeVolume && volume.Source != "" { // non anonymous volumes + if _, ok := project.Volumes[volume.Source]; !ok { + return fmt.Errorf("service %q refers to undefined volume %s: %w", s.Name, volume.Source, errdefs.ErrInvalid) + } + } + } + if s.Build != nil { + for _, secret := range s.Build.Secrets { + if _, ok := project.Secrets[secret.Source]; !ok { + return fmt.Errorf("service %q refers to undefined build secret %s: %w", s.Name, secret.Source, errdefs.ErrInvalid) + } + } + } + for _, config := range s.Configs { + if _, ok := project.Configs[config.Source]; !ok { + return fmt.Errorf("service %q refers to undefined config %s: %w", s.Name, config.Source, errdefs.ErrInvalid) + } + } + + for model := range s.Models { + if _, ok := project.Models[model]; !ok { + return fmt.Errorf("service %q refers to undefined model %s: %w", s.Name, model, errdefs.ErrInvalid) + } + } + + for _, secret := range s.Secrets { + if _, ok := project.Secrets[secret.Source]; !ok { + return fmt.Errorf("service %q refers to undefined secret %s: %w", s.Name, secret.Source, errdefs.ErrInvalid) + } + } + + if s.Scale != nil && s.Deploy != nil { + if s.Deploy.Replicas != nil && *s.Scale != *s.Deploy.Replicas { + return fmt.Errorf("services.%s: can't set distinct values on 'scale' and 'deploy.replicas': %w", + s.Name, errdefs.ErrInvalid) + } + s.Deploy.Replicas = s.Scale + } + + if s.Scale != nil && *s.Scale < 0 { + return fmt.Errorf("services.%s.scale: must be greater than or equal to 0", s.Name) + } + if s.Deploy != nil && s.Deploy.Replicas != nil && *s.Deploy.Replicas < 0 { + return fmt.Errorf("services.%s.deploy.replicas: must be greater than or equal to 0", s.Name) + } + + if s.CPUS != 0 && s.Deploy != nil { + if s.Deploy.Resources.Limits != nil && s.Deploy.Resources.Limits.NanoCPUs.Value() != s.CPUS { + return fmt.Errorf("services.%s: can't set distinct values on 'cpus' and 'deploy.resources.limits.cpus': %w", + s.Name, errdefs.ErrInvalid) + } + } + if s.MemLimit != 0 && s.Deploy != nil { + if s.Deploy.Resources.Limits != nil && s.Deploy.Resources.Limits.MemoryBytes != s.MemLimit { + return fmt.Errorf("services.%s: can't set distinct values on 'mem_limit' and 'deploy.resources.limits.memory': %w", + s.Name, errdefs.ErrInvalid) + } + } + if s.MemReservation != 0 && s.Deploy != nil { + if s.Deploy.Resources.Reservations != nil && s.Deploy.Resources.Reservations.MemoryBytes != s.MemReservation { + return fmt.Errorf("services.%s: can't set distinct values on 'mem_reservation' and 'deploy.resources.reservations.memory': %w", + s.Name, errdefs.ErrInvalid) + } + } + if s.PidsLimit != 0 && s.Deploy != nil { + if s.Deploy.Resources.Limits != nil && s.Deploy.Resources.Limits.Pids != s.PidsLimit { + return fmt.Errorf("services.%s: can't set distinct values on 'pids_limit' and 'deploy.resources.limits.pids': %w", + s.Name, errdefs.ErrInvalid) + } + } + + if s.GetScale() > 1 && s.ContainerName != "" { + attr := "scale" + if s.Scale == nil { + attr = "deploy.replicas" + } + return fmt.Errorf("services.%s: can't set container_name and %s as container name must be unique: %w", attr, + s.Name, errdefs.ErrInvalid) + } + + if s.Develop != nil && s.Develop.Watch != nil { + for _, watch := range s.Develop.Watch { + if watch.Target == "" && watch.Action != types.WatchActionRebuild && watch.Action != types.WatchActionRestart { + return fmt.Errorf("services.%s.develop.watch: target is required for non-rebuild actions: %w", s.Name, errdefs.ErrInvalid) + } + } + } + + mounts := map[string]string{} + for i, tmpfs := range s.Tmpfs { + loc := fmt.Sprintf("services.%s.tmpfs[%d]", s.Name, i) + path, _, _ := strings.Cut(tmpfs, ":") + if p, ok := mounts[path]; ok { + return fmt.Errorf("%s: target %s already mounted as %s", loc, path, p) + } + mounts[path] = loc + } + for i, volume := range s.Volumes { + loc := fmt.Sprintf("services.%s.volumes[%d]", s.Name, i) + if p, ok := mounts[volume.Target]; ok { + return fmt.Errorf("%s: target %s already mounted as %s", loc, volume.Target, p) + } + mounts[volume.Target] = loc + } + + } + + for name, secret := range project.Secrets { + if secret.External { + continue + } + if secret.File == "" && secret.Environment == "" { + return fmt.Errorf("secret %q must declare either `file` or `environment`: %w", name, errdefs.ErrInvalid) + } + } + + return graph.CheckCycle(project) +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/override/extends.go b/vendor/github.com/compose-spec/compose-go/v2/override/extends.go new file mode 100644 index 00000000..de92fd29 --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/override/extends.go @@ -0,0 +1,27 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package override + +import "github.com/compose-spec/compose-go/v2/tree" + +func ExtendService(base, override map[string]any) (map[string]any, error) { + yaml, err := MergeYaml(base, override, tree.NewPath("services.x")) + if err != nil { + return nil, err + } + return yaml.(map[string]any), nil +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/override/merge.go b/vendor/github.com/compose-spec/compose-go/v2/override/merge.go new file mode 100644 index 00000000..525299cd --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/override/merge.go @@ -0,0 +1,307 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package override + +import ( + "cmp" + "fmt" + "slices" + + "github.com/compose-spec/compose-go/v2/tree" +) + +// Merge applies overrides to a config model +func Merge(right, left map[string]any) (map[string]any, error) { + merged, err := MergeYaml(right, left, tree.NewPath()) + if err != nil { + return nil, err + } + return merged.(map[string]any), nil +} + +type merger func(any, any, tree.Path) (any, error) + +// mergeSpecials defines the custom rules applied by compose when merging yaml trees +var mergeSpecials = map[tree.Path]merger{} + +func init() { + mergeSpecials["networks.*.ipam.config"] = mergeIPAMConfig + mergeSpecials["networks.*.labels"] = mergeToSequence + mergeSpecials["volumes.*.labels"] = mergeToSequence + mergeSpecials["services.*.annotations"] = mergeToSequence + mergeSpecials["services.*.build"] = mergeBuild + mergeSpecials["services.*.build.args"] = mergeToSequence + mergeSpecials["services.*.build.additional_contexts"] = mergeToSequence + mergeSpecials["services.*.build.extra_hosts"] = mergeExtraHosts + mergeSpecials["services.*.build.labels"] = mergeToSequence + mergeSpecials["services.*.command"] = override + mergeSpecials["services.*.depends_on"] = mergeDependsOn + mergeSpecials["services.*.deploy.labels"] = mergeToSequence + mergeSpecials["services.*.dns"] = mergeToSequence + mergeSpecials["services.*.dns_opt"] = mergeToSequence + mergeSpecials["services.*.dns_search"] = mergeToSequence + mergeSpecials["services.*.entrypoint"] = override + mergeSpecials["services.*.env_file"] = mergeToSequence + mergeSpecials["services.*.label_file"] = mergeToSequence + mergeSpecials["services.*.environment"] = mergeToSequence + mergeSpecials["services.*.extra_hosts"] = mergeExtraHosts + mergeSpecials["services.*.healthcheck.test"] = override + mergeSpecials["services.*.labels"] = mergeToSequence + mergeSpecials["services.*.volumes.*.volume.labels"] = mergeToSequence + mergeSpecials["services.*.logging"] = mergeLogging + mergeSpecials["services.*.models"] = mergeModels + mergeSpecials["services.*.networks"] = mergeNetworks + mergeSpecials["services.*.sysctls"] = mergeToSequence + mergeSpecials["services.*.tmpfs"] = mergeToSequence + mergeSpecials["services.*.ulimits.*"] = mergeUlimit +} + +// MergeYaml merges map[string]any yaml trees handling special rules +func MergeYaml(e any, o any, p tree.Path) (any, error) { + for pattern, merger := range mergeSpecials { + if p.Matches(pattern) { + merged, err := merger(e, o, p) + if err != nil { + return nil, err + } + return merged, nil + } + } + if o == nil { + return e, nil + } + switch value := e.(type) { + case map[string]any: + other, ok := o.(map[string]any) + if !ok { + return nil, fmt.Errorf("cannot override %s", p) + } + return mergeMappings(value, other, p) + case []any: + other, ok := o.([]any) + if !ok { + return nil, fmt.Errorf("cannot override %s", p) + } + return append(value, other...), nil + default: + return o, nil + } +} + +func mergeMappings(mapping map[string]any, other map[string]any, p tree.Path) (map[string]any, error) { + for k, v := range other { + e, ok := mapping[k] + if !ok { + mapping[k] = v + continue + } + next := p.Next(k) + merged, err := MergeYaml(e, v, next) + if err != nil { + return nil, err + } + mapping[k] = merged + } + return mapping, nil +} + +// logging driver options are merged only when both compose file define the same driver +func mergeLogging(c any, o any, p tree.Path) (any, error) { + config := c.(map[string]any) + other := o.(map[string]any) + // we override logging config if source and override have the same driver set, or none + d, ok1 := other["driver"] + o, ok2 := config["driver"] + if d == o || !ok1 || !ok2 { + return mergeMappings(config, other, p) + } + return other, nil +} + +func mergeBuild(c any, o any, path tree.Path) (any, error) { + toBuild := func(c any) map[string]any { + switch v := c.(type) { + case string: + return map[string]any{ + "context": v, + } + case map[string]any: + return v + } + return nil + } + return mergeMappings(toBuild(c), toBuild(o), path) +} + +func mergeDependsOn(c any, o any, path tree.Path) (any, error) { + right := convertIntoMapping(c, map[string]any{ + "condition": "service_started", + "required": true, + }) + left := convertIntoMapping(o, map[string]any{ + "condition": "service_started", + "required": true, + }) + return mergeMappings(right, left, path) +} + +func mergeModels(c any, o any, path tree.Path) (any, error) { + right := convertIntoMapping(c, nil) + left := convertIntoMapping(o, nil) + return mergeMappings(right, left, path) +} + +func mergeNetworks(c any, o any, path tree.Path) (any, error) { + right := convertIntoMapping(c, nil) + left := convertIntoMapping(o, nil) + return mergeMappings(right, left, path) +} + +func mergeExtraHosts(c any, o any, _ tree.Path) (any, error) { + right := convertIntoSequence(c) + left := convertIntoSequence(o) + // Rewrite content of left slice to remove duplicate elements + i := 0 + for _, v := range left { + if !slices.Contains(right, v) { + left[i] = v + i++ + } + } + // keep only not duplicated elements from left slice + left = left[:i] + return append(right, left...), nil +} + +func mergeToSequence(c any, o any, _ tree.Path) (any, error) { + right := convertIntoSequence(c) + left := convertIntoSequence(o) + return append(right, left...), nil +} + +func convertIntoSequence(value any) []any { + switch v := value.(type) { + case map[string]any: + var seq []any + for k, val := range v { + if val == nil { + seq = append(seq, k) + } else { + switch vl := val.(type) { + // if val is an array we need to add the key with each value one by one + case []any: + for _, vlv := range vl { + seq = append(seq, fmt.Sprintf("%s=%v", k, vlv)) + } + default: + seq = append(seq, fmt.Sprintf("%s=%v", k, val)) + } + } + } + slices.SortFunc(seq, func(a, b any) int { + return cmp.Compare(a.(string), b.(string)) + }) + return seq + case []any: + return v + case string: + return []any{v} + } + return nil +} + +func mergeUlimit(_ any, o any, p tree.Path) (any, error) { + over, ismapping := o.(map[string]any) + if base, ok := o.(map[string]any); ok && ismapping { + return mergeMappings(base, over, p) + } + return o, nil +} + +func mergeIPAMConfig(c any, o any, path tree.Path) (any, error) { + var ipamConfigs []any + configs, ok := c.([]any) + if !ok { + return o, fmt.Errorf("%s: unexpected type %T", path, c) + } + overrides, ok := o.([]any) + if !ok { + return o, fmt.Errorf("%s: unexpected type %T", path, c) + } + for _, original := range configs { + right := convertIntoMapping(original, nil) + for _, override := range overrides { + left := convertIntoMapping(override, nil) + if left["subnet"] != right["subnet"] { + // check if left is already in ipamConfigs, add it if not and continue with the next config + if !slices.ContainsFunc(ipamConfigs, func(a any) bool { + return a.(map[string]any)["subnet"] == left["subnet"] + }) { + ipamConfigs = append(ipamConfigs, left) + continue + } + } + merged, err := mergeMappings(right, left, path) + if err != nil { + return nil, err + } + // find index of potential previous config with the same subnet in ipamConfigs + indexIfExist := slices.IndexFunc(ipamConfigs, func(a any) bool { + return a.(map[string]any)["subnet"] == merged["subnet"] + }) + // if a previous config is already in ipamConfigs, replace it + if indexIfExist >= 0 { + ipamConfigs[indexIfExist] = merged + } else { + // or add the new config to ipamConfigs + ipamConfigs = append(ipamConfigs, merged) + } + } + } + return ipamConfigs, nil +} + +func convertIntoMapping(a any, defaultValue map[string]any) map[string]any { + switch v := a.(type) { + case map[string]any: + return v + case []any: + converted := map[string]any{} + for _, s := range v { + if defaultValue == nil { + converted[s.(string)] = nil + } else { + // Create a new map for each key + converted[s.(string)] = copyMap(defaultValue) + } + } + return converted + } + return nil +} + +func copyMap(m map[string]any) map[string]any { + c := make(map[string]any) + for k, v := range m { + c[k] = v + } + return c +} + +func override(_ any, other any, _ tree.Path) (any, error) { + return other, nil +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/override/uncity.go b/vendor/github.com/compose-spec/compose-go/v2/override/uncity.go new file mode 100644 index 00000000..3b0c63d3 --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/override/uncity.go @@ -0,0 +1,229 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package override + +import ( + "fmt" + "strconv" + "strings" + + "github.com/compose-spec/compose-go/v2/format" + "github.com/compose-spec/compose-go/v2/tree" +) + +type indexer func(any, tree.Path) (string, error) + +// mergeSpecials defines the custom rules applied by compose when merging yaml trees +var unique = map[tree.Path]indexer{} + +func init() { + unique["networks.*.labels"] = keyValueIndexer + unique["networks.*.ipam.options"] = keyValueIndexer + unique["services.*.annotations"] = keyValueIndexer + unique["services.*.build.args"] = keyValueIndexer + unique["services.*.build.additional_contexts"] = keyValueIndexer + unique["services.*.build.platform"] = keyValueIndexer + unique["services.*.build.tags"] = keyValueIndexer + unique["services.*.build.labels"] = keyValueIndexer + unique["services.*.cap_add"] = keyValueIndexer + unique["services.*.cap_drop"] = keyValueIndexer + unique["services.*.devices"] = volumeIndexer + unique["services.*.configs"] = mountIndexer("") + unique["services.*.deploy.labels"] = keyValueIndexer + unique["services.*.dns"] = keyValueIndexer + unique["services.*.dns_opt"] = keyValueIndexer + unique["services.*.dns_search"] = keyValueIndexer + unique["services.*.environment"] = keyValueIndexer + unique["services.*.env_file"] = envFileIndexer + unique["services.*.expose"] = exposeIndexer + unique["services.*.labels"] = keyValueIndexer + unique["services.*.links"] = keyValueIndexer + unique["services.*.networks.*.aliases"] = keyValueIndexer + unique["services.*.networks.*.link_local_ips"] = keyValueIndexer + unique["services.*.ports"] = portIndexer + unique["services.*.profiles"] = keyValueIndexer + unique["services.*.secrets"] = mountIndexer("/run/secrets") + unique["services.*.sysctls"] = keyValueIndexer + unique["services.*.tmpfs"] = keyValueIndexer + unique["services.*.volumes"] = volumeIndexer + unique["services.*.devices"] = deviceMappingIndexer +} + +// EnforceUnicity removes redefinition of elements declared in a sequence +func EnforceUnicity(value map[string]any) (map[string]any, error) { + uniq, err := enforceUnicity(value, tree.NewPath()) + if err != nil { + return nil, err + } + return uniq.(map[string]any), nil +} + +func enforceUnicity(value any, p tree.Path) (any, error) { + switch v := value.(type) { + case map[string]any: + for k, e := range v { + u, err := enforceUnicity(e, p.Next(k)) + if err != nil { + return nil, err + } + v[k] = u + } + return v, nil + case []any: + for pattern, indexer := range unique { + if p.Matches(pattern) { + seq := []any{} + keys := map[string]int{} + for i, entry := range v { + key, err := indexer(entry, p.Next(fmt.Sprintf("[%d]", i))) + if err != nil { + return nil, err + } + if j, ok := keys[key]; ok { + seq[j] = entry + } else { + seq = append(seq, entry) + keys[key] = len(seq) - 1 + } + } + return seq, nil + } + } + } + return value, nil +} + +func keyValueIndexer(v any, p tree.Path) (string, error) { + switch value := v.(type) { + case string: + key, _, found := strings.Cut(value, "=") + if found { + return key, nil + } + return value, nil + default: + return "", fmt.Errorf("%s: unexpected type %T", p, v) + } +} + +func volumeIndexer(y any, p tree.Path) (string, error) { + switch value := y.(type) { + case map[string]any: + target, ok := value["target"].(string) + if !ok { + return "", fmt.Errorf("service volume %s is missing a mount target", p) + } + return target, nil + case string: + volume, err := format.ParseVolume(value) + if err != nil { + return "", err + } + return volume.Target, nil + } + return "", nil +} + +func deviceMappingIndexer(y any, p tree.Path) (string, error) { + switch value := y.(type) { + case map[string]any: + target, ok := value["target"].(string) + if !ok { + return "", fmt.Errorf("service device %s is missing a mount target", p) + } + return target, nil + case string: + arr := strings.Split(value, ":") + if len(arr) == 1 { + return arr[0], nil + } + return arr[1], nil + } + return "", nil +} + +func exposeIndexer(a any, path tree.Path) (string, error) { + switch v := a.(type) { + case string: + return v, nil + case int: + return strconv.Itoa(v), nil + default: + return "", fmt.Errorf("%s: unsupported expose value %s", path, a) + } +} + +func mountIndexer(defaultPath string) indexer { + return func(a any, path tree.Path) (string, error) { + switch v := a.(type) { + case string: + return fmt.Sprintf("%s/%s", defaultPath, v), nil + case map[string]any: + t, ok := v["target"] + if ok { + return t.(string), nil + } + return fmt.Sprintf("%s/%s", defaultPath, v["source"]), nil + default: + return "", fmt.Errorf("%s: unsupported expose value %s", path, a) + } + } +} + +func portIndexer(y any, p tree.Path) (string, error) { + switch value := y.(type) { + case int: + return strconv.Itoa(value), nil + case map[string]any: + target, ok := value["target"] + if !ok { + return "", fmt.Errorf("service ports %s is missing a target port", p) + } + published, ok := value["published"] + if !ok { + // try to parse it as an int + if pub, ok := value["published"]; ok { + published = fmt.Sprintf("%d", pub) + } + } + host, ok := value["host_ip"] + if !ok { + host = "0.0.0.0" + } + protocol, ok := value["protocol"] + if !ok { + protocol = "tcp" + } + return fmt.Sprintf("%s:%s:%d/%s", host, published, target, protocol), nil + case string: + return value, nil + } + return "", nil +} + +func envFileIndexer(y any, p tree.Path) (string, error) { + switch value := y.(type) { + case string: + return value, nil + case map[string]any: + if pathValue, ok := value["path"]; ok { + return pathValue.(string), nil + } + return "", fmt.Errorf("environment path attribute %s is missing", p) + } + return "", nil +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/paths/context.go b/vendor/github.com/compose-spec/compose-go/v2/paths/context.go new file mode 100644 index 00000000..ceab7d67 --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/paths/context.go @@ -0,0 +1,51 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package paths + +import ( + "strings" + + "github.com/compose-spec/compose-go/v2/types" +) + +func (r *relativePathsResolver) absContextPath(value any) (any, error) { + v := value.(string) + if strings.Contains(v, "://") { // `docker-image://` or any builder specific context type + return v, nil + } + if strings.HasPrefix(v, types.ServicePrefix) { // `docker-image://` or any builder specific context type + return v, nil + } + if isRemoteContext(v) { + return v, nil + } + return r.absPath(v) +} + +// isRemoteContext returns true if the value is a Git reference or HTTP(S) URL. +// +// Any other value is assumed to be a local filesystem path and returns false. +// +// See: https://github.com/moby/buildkit/blob/18fc875d9bfd6e065cd8211abc639434ba65aa56/frontend/dockerui/context.go#L76-L79 +func isRemoteContext(maybeURL string) bool { + for _, prefix := range []string{"https://", "http://", "git://", "ssh://", "github.com/", "git@"} { + if strings.HasPrefix(maybeURL, prefix) { + return true + } + } + return false +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/paths/extends.go b/vendor/github.com/compose-spec/compose-go/v2/paths/extends.go new file mode 100644 index 00000000..aa61a9f9 --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/paths/extends.go @@ -0,0 +1,25 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package paths + +func (r *relativePathsResolver) absExtendsPath(value any) (any, error) { + v := value.(string) + if r.isRemoteResource(v) { + return v, nil + } + return r.absPath(v) +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/paths/home.go b/vendor/github.com/compose-spec/compose-go/v2/paths/home.go new file mode 100644 index 00000000..a5579262 --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/paths/home.go @@ -0,0 +1,37 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package paths + +import ( + "os" + "path/filepath" + "strings" + + "github.com/sirupsen/logrus" +) + +func ExpandUser(p string) string { + if strings.HasPrefix(p, "~") { + home, err := os.UserHomeDir() + if err != nil { + logrus.Warn("cannot expand '~', because the environment lacks HOME") + return p + } + return filepath.Join(home, p[1:]) + } + return p +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/paths/resolve.go b/vendor/github.com/compose-spec/compose-go/v2/paths/resolve.go new file mode 100644 index 00000000..c58cb410 --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/paths/resolve.go @@ -0,0 +1,169 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package paths + +import ( + "errors" + "fmt" + "path/filepath" + + "github.com/compose-spec/compose-go/v2/tree" + "github.com/compose-spec/compose-go/v2/types" +) + +type resolver func(any) (any, error) + +// ResolveRelativePaths make relative paths absolute +func ResolveRelativePaths(project map[string]any, base string, remotes []RemoteResource) error { + r := relativePathsResolver{ + workingDir: base, + remotes: remotes, + } + r.resolvers = map[tree.Path]resolver{ + "services.*.build.context": r.absContextPath, + "services.*.build.additional_contexts.*": r.absContextPath, + "services.*.build.ssh.*": r.maybeUnixPath, + "services.*.env_file.*.path": r.absPath, + "services.*.label_file.*": r.absPath, + "services.*.extends.file": r.absExtendsPath, + "services.*.develop.watch.*.path": r.absSymbolicLink, + "services.*.volumes.*": r.absVolumeMount, + "configs.*.file": r.maybeUnixPath, + "secrets.*.file": r.maybeUnixPath, + "include.path": r.absPath, + "include.project_directory": r.absPath, + "include.env_file": r.absPath, + "volumes.*": r.volumeDriverOpts, + } + _, err := r.resolveRelativePaths(project, tree.NewPath()) + return err +} + +type RemoteResource func(path string) bool + +type relativePathsResolver struct { + workingDir string + remotes []RemoteResource + resolvers map[tree.Path]resolver +} + +func (r *relativePathsResolver) isRemoteResource(path string) bool { + for _, remote := range r.remotes { + if remote(path) { + return true + } + } + return false +} + +func (r *relativePathsResolver) resolveRelativePaths(value any, p tree.Path) (any, error) { + for pattern, resolver := range r.resolvers { + if p.Matches(pattern) { + return resolver(value) + } + } + switch v := value.(type) { + case map[string]any: + for k, e := range v { + resolved, err := r.resolveRelativePaths(e, p.Next(k)) + if err != nil { + return nil, err + } + v[k] = resolved + } + case []any: + for i, e := range v { + resolved, err := r.resolveRelativePaths(e, p.Next("[]")) + if err != nil { + return nil, err + } + v[i] = resolved + } + } + return value, nil +} + +func (r *relativePathsResolver) absPath(value any) (any, error) { + switch v := value.(type) { + case []any: + for i, s := range v { + abs, err := r.absPath(s) + if err != nil { + return nil, err + } + v[i] = abs + } + return v, nil + case string: + v = ExpandUser(v) + if filepath.IsAbs(v) { + return v, nil + } + if v != "" { + return filepath.Join(r.workingDir, v), nil + } + return v, nil + } + + return nil, fmt.Errorf("unexpected type %T", value) +} + +func (r *relativePathsResolver) absVolumeMount(a any) (any, error) { + switch vol := a.(type) { + case map[string]any: + if vol["type"] != types.VolumeTypeBind { + return vol, nil + } + src, ok := vol["source"] + if !ok { + return nil, errors.New(`invalid mount config for type "bind": field Source must not be empty`) + } + abs, err := r.maybeUnixPath(src.(string)) + if err != nil { + return nil, err + } + vol["source"] = abs + return vol, nil + default: + // not using canonical format, skip + return a, nil + } +} + +func (r *relativePathsResolver) volumeDriverOpts(a any) (any, error) { + if a == nil { + return nil, nil + } + vol := a.(map[string]any) + if vol["driver"] != "local" { + return vol, nil + } + do, ok := vol["driver_opts"] + if !ok { + return vol, nil + } + opts := do.(map[string]any) + if dev, ok := opts["device"]; opts["o"] == "bind" && ok { + // This is actually a bind mount + path, err := r.maybeUnixPath(dev) + if err != nil { + return nil, err + } + opts["device"] = path + } + return vol, nil +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/paths/unix.go b/vendor/github.com/compose-spec/compose-go/v2/paths/unix.go new file mode 100644 index 00000000..ccfe9fae --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/paths/unix.go @@ -0,0 +1,57 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package paths + +import ( + "path" + "path/filepath" + + "github.com/compose-spec/compose-go/v2/utils" +) + +func (r *relativePathsResolver) maybeUnixPath(a any) (any, error) { + p, ok := a.(string) + if !ok { + return a, nil + } + p = ExpandUser(p) + // Check if source is an absolute path (either Unix or Windows), to + // handle a Windows client with a Unix daemon or vice-versa. + // + // Note that this is not required for Docker for Windows when specifying + // a local Windows path, because Docker for Windows translates the Windows + // path into a valid path within the VM. + if !path.IsAbs(p) && !IsWindowsAbs(p) { + if filepath.IsAbs(p) { + return p, nil + } + return filepath.Join(r.workingDir, p), nil + } + return p, nil +} + +func (r *relativePathsResolver) absSymbolicLink(value any) (any, error) { + abs, err := r.absPath(value) + if err != nil { + return nil, err + } + str, ok := abs.(string) + if !ok { + return abs, nil + } + return utils.ResolveSymbolicLink(str) +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/paths/windows_path.go b/vendor/github.com/compose-spec/compose-go/v2/paths/windows_path.go new file mode 100644 index 00000000..c296f599 --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/paths/windows_path.go @@ -0,0 +1,233 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package paths + +// This file contains utilities to check for Windows absolute paths on Linux. +// The code in this file was largely copied from the Golang filepath package +// https://github.com/golang/go/blob/master/src/internal/filepathlite/path_windows.go + +import "slices" + +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +// https://github.com/golang/go/blob/master/LICENSE + +func IsPathSeparator(c uint8) bool { + return c == '\\' || c == '/' +} + +// IsWindowsAbs reports whether the path is absolute. +// copied from IsAbs(path string) (b bool) from internal.filetpathlite +func IsWindowsAbs(path string) (b bool) { + l := volumeNameLen(path) + if l == 0 { + return false + } + // If the volume name starts with a double slash, this is an absolute path. + if IsPathSeparator(path[0]) && IsPathSeparator(path[1]) { + return true + } + path = path[l:] + if path == "" { + return false + } + return IsPathSeparator(path[0]) +} + +// volumeNameLen returns length of the leading volume name on Windows. +// It returns 0 elsewhere. +// +// See: +// https://learn.microsoft.com/en-us/dotnet/standard/io/file-path-formats +// https://googleprojectzero.blogspot.com/2016/02/the-definitive-guide-on-win32-to-nt.html +func volumeNameLen(path string) int { + switch { + case len(path) >= 2 && path[1] == ':': + // Path starts with a drive letter. + // + // Not all Windows functions necessarily enforce the requirement that + // drive letters be in the set A-Z, and we don't try to here. + // + // We don't handle the case of a path starting with a non-ASCII character, + // in which case the "drive letter" might be multiple bytes long. + return 2 + + case len(path) == 0 || !IsPathSeparator(path[0]): + // Path does not have a volume component. + return 0 + + case pathHasPrefixFold(path, `\\.\UNC`): + // We're going to treat the UNC host and share as part of the volume + // prefix for historical reasons, but this isn't really principled; + // Windows's own GetFullPathName will happily remove the first + // component of the path in this space, converting + // \\.\unc\a\b\..\c into \\.\unc\a\c. + return uncLen(path, len(`\\.\UNC\`)) + + case pathHasPrefixFold(path, `\\.`) || + pathHasPrefixFold(path, `\\?`) || pathHasPrefixFold(path, `\??`): + // Path starts with \\.\, and is a Local Device path; or + // path starts with \\?\ or \??\ and is a Root Local Device path. + // + // We treat the next component after the \\.\ prefix as + // part of the volume name, which means Clean(`\\?\c:\`) + // won't remove the trailing \. (See #64028.) + if len(path) == 3 { + return 3 // exactly \\. + } + _, rest, ok := cutPath(path[4:]) + if !ok { + return len(path) + } + return len(path) - len(rest) - 1 + + case len(path) >= 2 && IsPathSeparator(path[1]): + // Path starts with \\, and is a UNC path. + return uncLen(path, 2) + } + return 0 +} + +// pathHasPrefixFold tests whether the path s begins with prefix, +// ignoring case and treating all path separators as equivalent. +// If s is longer than prefix, then s[len(prefix)] must be a path separator. +func pathHasPrefixFold(s, prefix string) bool { + if len(s) < len(prefix) { + return false + } + for i := 0; i < len(prefix); i++ { + if IsPathSeparator(prefix[i]) { + if !IsPathSeparator(s[i]) { + return false + } + } else if toUpper(prefix[i]) != toUpper(s[i]) { + return false + } + } + if len(s) > len(prefix) && !IsPathSeparator(s[len(prefix)]) { + return false + } + return true +} + +// uncLen returns the length of the volume prefix of a UNC path. +// prefixLen is the prefix prior to the start of the UNC host; +// for example, for "//host/share", the prefixLen is len("//")==2. +func uncLen(path string, prefixLen int) int { + count := 0 + for i := prefixLen; i < len(path); i++ { + if IsPathSeparator(path[i]) { + count++ + if count == 2 { + return i + } + } + } + return len(path) +} + +// cutPath slices path around the first path separator. +func cutPath(path string) (before, after string, found bool) { + for i := range path { + if IsPathSeparator(path[i]) { + return path[:i], path[i+1:], true + } + } + return path, "", false +} + +// postClean adjusts the results of Clean to avoid turning a relative path +// into an absolute or rooted one. +func postClean(out *lazybuf) { + if out.volLen != 0 || out.buf == nil { + return + } + // If a ':' appears in the path element at the start of a path, + // insert a .\ at the beginning to avoid converting relative paths + // like a/../c: into c:. + for _, c := range out.buf { + if IsPathSeparator(c) { + break + } + if c == ':' { + out.prepend('.', Separator) + return + } + } + // If a path begins with \??\, insert a \. at the beginning + // to avoid converting paths like \a\..\??\c:\x into \??\c:\x + // (equivalent to c:\x). + if len(out.buf) >= 3 && IsPathSeparator(out.buf[0]) && out.buf[1] == '?' && out.buf[2] == '?' { + out.prepend(Separator, '.') + } +} + +func toUpper(c byte) byte { + if 'a' <= c && c <= 'z' { + return c - ('a' - 'A') + } + return c +} + +const ( + Separator = '\\' // OS-specific path separator +) + +// A lazybuf is a lazily constructed path buffer. +// It supports append, reading previously appended bytes, +// and retrieving the final string. It does not allocate a buffer +// to hold the output until that output diverges from s. +type lazybuf struct { + path string + buf []byte + w int + volAndPath string + volLen int +} + +func (b *lazybuf) index(i int) byte { + if b.buf != nil { + return b.buf[i] + } + return b.path[i] +} + +func (b *lazybuf) append(c byte) { + if b.buf == nil { + if b.w < len(b.path) && b.path[b.w] == c { + b.w++ + return + } + b.buf = make([]byte, len(b.path)) + copy(b.buf, b.path[:b.w]) + } + b.buf[b.w] = c + b.w++ +} + +func (b *lazybuf) prepend(prefix ...byte) { + b.buf = slices.Insert(b.buf, 0, prefix...) + b.w += len(prefix) +} + +func (b *lazybuf) string() string { + if b.buf == nil { + return b.volAndPath[:b.volLen+b.w] + } + return b.volAndPath[:b.volLen] + string(b.buf[:b.w]) +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/schema/compose-spec.json b/vendor/github.com/compose-spec/compose-go/v2/schema/compose-spec.json new file mode 100644 index 00000000..462de285 --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/schema/compose-spec.json @@ -0,0 +1,1912 @@ +{ + "$schema": "https://json-schema.org/draft-07/schema", + "$id": "compose_spec.json", + "type": "object", + "title": "Compose Specification", + "description": "The Compose file is a YAML file defining a multi-containers based application.", + + "properties": { + "version": { + "type": "string", + "deprecated": true, + "description": "declared for backward compatibility, ignored. Please remove it." + }, + + "name": { + "type": "string", + "description": "define the Compose project name, until user defines one explicitly." + }, + + "include": { + "type": "array", + "items": { + "$ref": "#/definitions/include" + }, + "description": "compose sub-projects to be included." + }, + + "services": { + "type": "object", + "patternProperties": { + "^[a-zA-Z0-9._-]+$": { + "$ref": "#/definitions/service" + } + }, + "additionalProperties": false, + "description": "The services that will be used by your application." + }, + + "models": { + "type": "object", + "patternProperties": { + "^[a-zA-Z0-9._-]+$": { + "$ref": "#/definitions/model" + } + }, + "description": "Language models that will be used by your application." + }, + + + "networks": { + "type": "object", + "patternProperties": { + "^[a-zA-Z0-9._-]+$": { + "$ref": "#/definitions/network" + } + }, + "description": "Networks that are shared among multiple services." + }, + + "volumes": { + "type": "object", + "patternProperties": { + "^[a-zA-Z0-9._-]+$": { + "$ref": "#/definitions/volume" + } + }, + "additionalProperties": false, + "description": "Named volumes that are shared among multiple services." + }, + + "secrets": { + "type": "object", + "patternProperties": { + "^[a-zA-Z0-9._-]+$": { + "$ref": "#/definitions/secret" + } + }, + "additionalProperties": false, + "description": "Secrets that are shared among multiple services." + }, + + "configs": { + "type": "object", + "patternProperties": { + "^[a-zA-Z0-9._-]+$": { + "$ref": "#/definitions/config" + } + }, + "additionalProperties": false, + "description": "Configurations that are shared among multiple services." + } + }, + + "patternProperties": {"^x-": {}}, + "additionalProperties": false, + + "definitions": { + + "service": { + "type": "object", + "description": "Configuration for a service.", + "properties": { + "develop": {"$ref": "#/definitions/development"}, + "deploy": {"$ref": "#/definitions/deployment"}, + "annotations": {"$ref": "#/definitions/list_or_dict"}, + "attach": {"type": ["boolean", "string"]}, + "build": { + "description": "Configuration options for building the service's image.", + "oneOf": [ + {"type": "string", "description": "Path to the build context. Can be a relative path or a URL."}, + { + "type": "object", + "properties": { + "context": {"type": "string", "description": "Path to the build context. Can be a relative path or a URL."}, + "dockerfile": {"type": "string", "description": "Name of the Dockerfile to use for building the image."}, + "dockerfile_inline": {"type": "string", "description": "Inline Dockerfile content to use instead of a Dockerfile from the build context."}, + "entitlements": {"type": "array", "items": {"type": "string"}, "description": "List of extra privileged entitlements to grant to the build process."}, + "args": {"$ref": "#/definitions/list_or_dict", "description": "Build-time variables, specified as a map or a list of KEY=VAL pairs."}, + "ssh": {"$ref": "#/definitions/list_or_dict", "description": "SSH agent socket or keys to expose to the build. Format is either a string or a list of 'default|[=|[,]]'."}, + "labels": {"$ref": "#/definitions/list_or_dict", "description": "Labels to apply to the built image."}, + "cache_from": {"type": "array", "items": {"type": "string"}, "description": "List of sources the image builder should use for cache resolution"}, + "cache_to": {"type": "array", "items": {"type": "string"}, "description": "Cache destinations for the build cache."}, + "no_cache": {"type": ["boolean", "string"], "description": "Do not use cache when building the image."}, + "no_cache_filter": {"$ref": "#/definitions/string_or_list", "description": "Do not use build cache for the specified stages."}, + "additional_contexts": {"$ref": "#/definitions/list_or_dict", "description": "Additional build contexts to use, specified as a map of name to context path or URL."}, + "network": {"type": "string", "description": "Network mode to use for the build. Options include 'default', 'none', 'host', or a network name."}, + "provenance": {"type": ["string","boolean"], "description": "Add a provenance attestation"}, + "sbom": {"type": ["string","boolean"], "description": "Add a SBOM attestation"}, + "pull": {"type": ["boolean", "string"], "description": "Always attempt to pull a newer version of the image."}, + "target": {"type": "string", "description": "Build stage to target in a multi-stage Dockerfile."}, + "shm_size": {"type": ["integer", "string"], "description": "Size of /dev/shm for the build container. A string value can use suffix like '2g' for 2 gigabytes."}, + "extra_hosts": {"$ref": "#/definitions/extra_hosts", "description": "Add hostname mappings for the build container."}, + "isolation": {"type": "string", "description": "Container isolation technology to use for the build process."}, + "privileged": {"type": ["boolean", "string"], "description": "Give extended privileges to the build container."}, + "secrets": {"$ref": "#/definitions/service_config_or_secret", "description": "Secrets to expose to the build. These are accessible at build-time."}, + "tags": {"type": "array", "items": {"type": "string"}, "description": "Additional tags to apply to the built image."}, + "ulimits": {"$ref": "#/definitions/ulimits", "description": "Override the default ulimits for the build container."}, + "platforms": {"type": "array", "items": {"type": "string"}, "description": "Platforms to build for, e.g., 'linux/amd64', 'linux/arm64', or 'windows/amd64'."} + }, + "additionalProperties": false, + "patternProperties": {"^x-": {}} + } + ] + }, + "blkio_config": { + "type": "object", + "description": "Block IO configuration for the service.", + "properties": { + "device_read_bps": { + "type": "array", + "description": "Limit read rate (bytes per second) from a device.", + "items": {"$ref": "#/definitions/blkio_limit"} + }, + "device_read_iops": { + "type": "array", + "description": "Limit read rate (IO per second) from a device.", + "items": {"$ref": "#/definitions/blkio_limit"} + }, + "device_write_bps": { + "type": "array", + "description": "Limit write rate (bytes per second) to a device.", + "items": {"$ref": "#/definitions/blkio_limit"} + }, + "device_write_iops": { + "type": "array", + "description": "Limit write rate (IO per second) to a device.", + "items": {"$ref": "#/definitions/blkio_limit"} + }, + "weight": { + "type": ["integer", "string"], + "description": "Block IO weight (relative weight) for the service, between 10 and 1000." + }, + "weight_device": { + "type": "array", + "description": "Block IO weight (relative weight) for specific devices.", + "items": {"$ref": "#/definitions/blkio_weight"} + } + }, + "additionalProperties": false + }, + "cap_add": { + "type": "array", + "items": {"type": "string"}, + "uniqueItems": true, + "description": "Add Linux capabilities. For example, 'CAP_SYS_ADMIN', 'SYS_ADMIN', or 'NET_ADMIN'." + }, + "cap_drop": { + "type": "array", + "items": {"type": "string"}, + "uniqueItems": true, + "description": "Drop Linux capabilities. For example, 'CAP_SYS_ADMIN', 'SYS_ADMIN', or 'NET_ADMIN'." + }, + "cgroup": { + "type": "string", + "enum": ["host", "private"], + "description": "Specify the cgroup namespace to join. Use 'host' to use the host's cgroup namespace, or 'private' to use a private cgroup namespace." + }, + "cgroup_parent": { + "type": "string", + "description": "Specify an optional parent cgroup for the container." + }, + "command": { + "$ref": "#/definitions/command", + "description": "Override the default command declared by the container image, for example 'CMD' in Dockerfile." + }, + "configs": { + "$ref": "#/definitions/service_config_or_secret", + "description": "Grant access to Configs on a per-service basis." + }, + "container_name": { + "type": "string", + "description": "Specify a custom container name, rather than a generated default name.", + "pattern": "[a-zA-Z0-9][a-zA-Z0-9_.-]+" + }, + "cpu_count": { + "oneOf": [ + {"type": "string"}, + {"type": "integer", "minimum": 0} + ], + "description": "Number of usable CPUs." + }, + "cpu_percent": { + "oneOf": [ + {"type": "string"}, + {"type": "integer", "minimum": 0, "maximum": 100} + ], + "description": "Percentage of CPU resources to use." + }, + "cpu_shares": { + "type": ["number", "string"], + "description": "CPU shares (relative weight) for the container." + }, + "cpu_quota": { + "type": ["number", "string"], + "description": "Limit the CPU CFS (Completely Fair Scheduler) quota." + }, + "cpu_period": { + "type": ["number", "string"], + "description": "Limit the CPU CFS (Completely Fair Scheduler) period." + }, + "cpu_rt_period": { + "type": ["number", "string"], + "description": "Limit the CPU real-time period in microseconds or a duration." + }, + "cpu_rt_runtime": { + "type": ["number", "string"], + "description": "Limit the CPU real-time runtime in microseconds or a duration." + }, + "cpus": { + "type": ["number", "string"], + "description": "Number of CPUs to use. A floating-point value is supported to request partial CPUs." + }, + "cpuset": { + "type": "string", + "description": "CPUs in which to allow execution (0-3, 0,1)." + }, + "credential_spec": { + "type": "object", + "description": "Configure the credential spec for managed service account.", + "properties": { + "config": { + "type": "string", + "description": "The name of the credential spec Config to use." + }, + "file": { + "type": "string", + "description": "Path to a credential spec file." + }, + "registry": { + "type": "string", + "description": "Path to a credential spec in the Windows registry." + } + }, + "additionalProperties": false, + "patternProperties": {"^x-": {}} + }, + "depends_on": { + "oneOf": [ + {"$ref": "#/definitions/list_of_strings"}, + { + "type": "object", + "additionalProperties": false, + "patternProperties": { + "^[a-zA-Z0-9._-]+$": { + "type": "object", + "additionalProperties": false, + "patternProperties": {"^x-": {}}, + "properties": { + "restart": { + "type": ["boolean", "string"], + "description": "Whether to restart dependent services when this service is restarted." + }, + "required": { + "type": "boolean", + "default": true, + "description": "Whether the dependency is required for the dependent service to start." + }, + "condition": { + "type": "string", + "enum": ["service_started", "service_healthy", "service_completed_successfully"], + "description": "Condition to wait for. 'service_started' waits until the service has started, 'service_healthy' waits until the service is healthy (as defined by its healthcheck), 'service_completed_successfully' waits until the service has completed successfully." + } + }, + "required": ["condition"] + } + } + } + ], + "description": "Express dependency between services. Service dependencies cause services to be started in dependency order. The dependent service will wait for the dependency to be ready before starting." + }, + "device_cgroup_rules": { + "$ref": "#/definitions/list_of_strings", + "description": "Add rules to the cgroup allowed devices list." + }, + "devices": { + "type": "array", + "description": "List of device mappings for the container.", + "items": { + "oneOf": [ + {"type": "string"}, + { + "type": "object", + "required": ["source"], + "properties": { + "source": { + "type": "string", + "description": "Path on the host to the device." + }, + "target": { + "type": "string", + "description": "Path in the container where the device will be mapped." + }, + "permissions": { + "type": "string", + "description": "Cgroup permissions for the device (rwm)." + } + }, + "additionalProperties": false, + "patternProperties": {"^x-": {}} + } + ] + } + }, + "dns": { + "$ref": "#/definitions/string_or_list", + "description": "Custom DNS servers to set for the service container." + }, + "dns_opt": { + "type": "array", + "items": {"type": "string"}, + "uniqueItems": true, + "description": "Custom DNS options to be passed to the container's DNS resolver." + }, + "dns_search": { + "$ref": "#/definitions/string_or_list", + "description": "Custom DNS search domains to set on the service container." + }, + "domainname": { + "type": "string", + "description": "Custom domain name to use for the service container." + }, + "entrypoint": { + "$ref": "#/definitions/command", + "description": "Override the default entrypoint declared by the container image, for example 'ENTRYPOINT' in Dockerfile." + }, + "env_file": { + "$ref": "#/definitions/env_file", + "description": "Add environment variables from a file or multiple files. Can be a single file path or a list of file paths." + }, + "label_file": { + "$ref": "#/definitions/label_file", + "description": "Add metadata to containers using files containing Docker labels." + }, + "environment": { + "$ref": "#/definitions/list_or_dict", + "description": "Add environment variables. You can use either an array or a list of KEY=VAL pairs." + }, + "expose": { + "type": "array", + "items": { + "type": ["string", "number"] + }, + "uniqueItems": true, + "description": "Expose ports without publishing them to the host machine - they'll only be accessible to linked services." + }, + "extends": { + "oneOf": [ + {"type": "string"}, + { + "type": "object", + "properties": { + "service": { + "type": "string", + "description": "The name of the service to extend." + }, + "file": { + "type": "string", + "description": "The file path where the service to extend is defined." + } + }, + "required": ["service"], + "additionalProperties": false + } + ], + "description": "Extend another service, in the current file or another file." + }, + "provider": { + "type": "object", + "description": "Specify a service which will not be manage by Compose directly, and delegate its management to an external provider.", + "required": ["type"], + "properties": { + "type": { + "type": "string", + "description": "External component used by Compose to manage setup and teardown lifecycle of the service." + }, + "options": { + "type": "object", + "description": "Provider-specific options.", + "patternProperties": { + "^.+$": {"oneOf": [ + { "type": ["string", "number", "boolean"] }, + { "type": "array", "items": {"type": ["string", "number", "boolean"]}} + ]} + } + } + }, + "additionalProperties": false, + "patternProperties": {"^x-": {}} + }, + "external_links": { + "type": "array", + "items": {"type": "string"}, + "uniqueItems": true, + "description": "Link to services started outside this Compose application. Specify services as :." + }, + "extra_hosts": { + "$ref": "#/definitions/extra_hosts", + "description": "Add hostname mappings to the container network interface configuration." + }, + "gpus": { + "$ref": "#/definitions/gpus", + "description": "Define GPU devices to use. Can be set to 'all' to use all GPUs, or a list of specific GPU devices." + }, + "group_add": { + "type": "array", + "items": { + "type": ["string", "number"] + }, + "uniqueItems": true, + "description": "Add additional groups which user inside the container should be member of." + }, + "healthcheck": { + "$ref": "#/definitions/healthcheck", + "description": "Configure a health check for the container to monitor its health status." + }, + "hostname": { + "type": "string", + "description": "Define a custom hostname for the service container." + }, + "image": { + "type": "string", + "description": "Specify the image to start the container from. Can be a repository/tag, a digest, or a local image ID." + }, + "init": { + "type": ["boolean", "string"], + "description": "Run as an init process inside the container that forwards signals and reaps processes." + }, + "ipc": { + "type": "string", + "description": "IPC sharing mode for the service container. Use 'host' to share the host's IPC namespace, 'service:[service_name]' to share with another service, or 'shareable' to allow other services to share this service's IPC namespace." + }, + "isolation": { + "type": "string", + "description": "Container isolation technology to use. Supported values are platform-specific." + }, + "labels": { + "$ref": "#/definitions/list_or_dict", + "description": "Add metadata to containers using Docker labels. You can use either an array or a list." + }, + "links": { + "type": "array", + "items": {"type": "string"}, + "uniqueItems": true, + "description": "Link to containers in another service. Either specify both the service name and a link alias (SERVICE:ALIAS), or just the service name." + }, + "logging": { + "type": "object", + "description": "Logging configuration for the service.", + "properties": { + "driver": { + "type": "string", + "description": "Logging driver to use, such as 'json-file', 'syslog', 'journald', etc." + }, + "options": { + "type": "object", + "description": "Options for the logging driver.", + "patternProperties": { + "^.+$": {"type": ["string", "number", "null"]} + } + } + }, + "additionalProperties": false, + "patternProperties": {"^x-": {}} + }, + "mac_address": { + "type": "string", + "description": "Container MAC address to set." + }, + "mem_limit": { + "type": ["number", "string"], + "description": "Memory limit for the container. A string value can use suffix like '2g' for 2 gigabytes." + }, + "mem_reservation": { + "type": ["string", "integer"], + "description": "Memory reservation for the container." + }, + "mem_swappiness": { + "type": ["integer", "string"], + "description": "Container memory swappiness as percentage (0 to 100)." + }, + "memswap_limit": { + "type": ["number", "string"], + "description": "Amount of memory the container is allowed to swap to disk. Set to -1 to enable unlimited swap." + }, + "network_mode": { + "type": "string", + "description": "Network mode. Values can be 'bridge', 'host', 'none', 'service:[service name]', or 'container:[container name]'." + }, + "models": { + "oneOf": [ + {"$ref": "#/definitions/list_of_strings"}, + {"type": "object", + "patternProperties": { + "^[a-zA-Z0-9._-]+$": { + "oneOf": [ + { + "type": "object", + "properties": { + "endpoint_var": { + "type": "string", + "description": "Environment variable set to AI model endpoint." + }, + "model_var": { + "type": "string", + "description": "Environment variable set to AI model name." + } + }, + "additionalProperties": false, + "patternProperties": {"^x-": {}} + }, + {"type": "null"} + ] + } + } + } + ], + "description": "AI Models to use, referencing entries under the top-level models key." + }, + "networks": { + "oneOf": [ + {"$ref": "#/definitions/list_of_strings"}, + { + "type": "object", + "patternProperties": { + "^[a-zA-Z0-9._-]+$": { + "oneOf": [ + { + "type": "object", + "properties": { + "aliases": { + "$ref": "#/definitions/list_of_strings", + "description": "Alternative hostnames for this service on the network." + }, + "interface_name": { + "type": "string", + "description": "Interface network name used to connect to network" + }, + "ipv4_address": { + "type": "string", + "description": "Specify a static IPv4 address for this service on this network." + }, + "ipv6_address": { + "type": "string", + "description": "Specify a static IPv6 address for this service on this network." + }, + "link_local_ips": { + "$ref": "#/definitions/list_of_strings", + "description": "List of link-local IPs." + }, + "mac_address": { + "type": "string", + "description": "Specify a MAC address for this service on this network." + }, + "driver_opts": { + "type": "object", + "description": "Driver options for this network.", + "patternProperties": { + "^.+$": {"type": ["string", "number"]} + } + }, + "priority": { + "type": "number", + "description": "Specify the priority for the network connection." + }, + "gw_priority": { + "type": "number", + "description": "Specify the gateway priority for the network connection." + } + }, + "additionalProperties": false, + "patternProperties": {"^x-": {}} + }, + {"type": "null"} + ] + } + }, + "additionalProperties": false + } + ], + "description": "Networks to join, referencing entries under the top-level networks key. Can be a list of network names or a mapping of network name to network configuration." + }, + "oom_kill_disable": { + "type": ["boolean", "string"], + "description": "Disable OOM Killer for the container." + }, + "oom_score_adj": { + "oneOf": [ + {"type": "string"}, + {"type": "integer", "minimum": -1000, "maximum": 1000} + ], + "description": "Tune host's OOM preferences for the container (accepts -1000 to 1000)." + }, + "pid": { + "type": ["string", "null"], + "description": "PID mode for container." + }, + "pids_limit": { + "type": ["number", "string"], + "description": "Tune a container's PIDs limit. Set to -1 for unlimited PIDs." + }, + "platform": { + "type": "string", + "description": "Target platform to run on, e.g., 'linux/amd64', 'linux/arm64', or 'windows/amd64'." + }, + "ports": { + "type": "array", + "description": "Expose container ports. Short format ([HOST:]CONTAINER[/PROTOCOL]).", + "items": { + "oneOf": [ + {"type": "number"}, + {"type": "string"}, + { + "type": "object", + "properties": { + "name": { + "type": "string", + "description": "A human-readable name for this port mapping." + }, + "mode": { + "type": "string", + "description": "The port binding mode, either 'host' for publishing a host port or 'ingress' for load balancing." + }, + "host_ip": { + "type": "string", + "description": "The host IP to bind to." + }, + "target": { + "type": ["integer", "string"], + "description": "The port inside the container." + }, + "published": { + "type": ["string", "integer"], + "description": "The publicly exposed port." + }, + "protocol": { + "type": "string", + "description": "The port protocol (tcp or udp)." + }, + "app_protocol": { + "type": "string", + "description": "Application protocol to use with the port (e.g., http, https, mysql)." + } + }, + "additionalProperties": false, + "patternProperties": {"^x-": {}} + } + ] + }, + "uniqueItems": true + }, + "post_start": { + "type": "array", + "items": {"$ref": "#/definitions/service_hook"}, + "description": "Commands to run after the container starts. If any command fails, the container stops." + }, + "pre_stop": { + "type": "array", + "items": {"$ref": "#/definitions/service_hook"}, + "description": "Commands to run before the container stops. If any command fails, the container stop is aborted." + }, + "privileged": { + "type": ["boolean", "string"], + "description": "Give extended privileges to the service container." + }, + "profiles": { + "$ref": "#/definitions/list_of_strings", + "description": "List of profiles for this service. When profiles are specified, services are only started when the profile is activated." + }, + "pull_policy": { + "type": "string", + "pattern": "always|never|build|if_not_present|missing|refresh|daily|weekly|every_([0-9]+[wdhms])+", + "description": "Policy for pulling images. Options include: 'always', 'never', 'if_not_present', 'missing', 'build', or time-based refresh policies." + }, + "pull_refresh_after": { + "type": "string", + "description": "Time after which to refresh the image. Used with pull_policy=refresh." + }, + "read_only": { + "type": ["boolean", "string"], + "description": "Mount the container's filesystem as read only." + }, + "restart": { + "type": "string", + "description": "Restart policy for the service container. Options include: 'no', 'always', 'on-failure', and 'unless-stopped'." + }, + "runtime": { + "type": "string", + "description": "Runtime to use for this container, e.g., 'runc'." + }, + "scale": { + "type": ["integer", "string"], + "description": "Number of containers to deploy for this service." + }, + "security_opt": { + "type": "array", + "items": {"type": "string"}, + "uniqueItems": true, + "description": "Override the default labeling scheme for each container." + }, + "shm_size": { + "type": ["number", "string"], + "description": "Size of /dev/shm. A string value can use suffix like '2g' for 2 gigabytes." + }, + "secrets": { + "$ref": "#/definitions/service_config_or_secret", + "description": "Grant access to Secrets on a per-service basis." + }, + "sysctls": { + "$ref": "#/definitions/list_or_dict", + "description": "Kernel parameters to set in the container. You can use either an array or a list." + }, + "stdin_open": { + "type": ["boolean", "string"], + "description": "Keep STDIN open even if not attached." + }, + "stop_grace_period": { + "type": "string", + "description": "Time to wait for the container to stop gracefully before sending SIGKILL (e.g., '1s', '1m30s')." + }, + "stop_signal": { + "type": "string", + "description": "Signal to stop the container (e.g., 'SIGTERM', 'SIGINT')." + }, + "storage_opt": { + "type": "object", + "description": "Storage driver options for the container." + }, + "tmpfs": { + "$ref": "#/definitions/string_or_list", + "description": "Mount a temporary filesystem (tmpfs) into the container. Can be a single value or a list." + }, + "tty": { + "type": ["boolean", "string"], + "description": "Allocate a pseudo-TTY to service container." + }, + "ulimits": { + "$ref": "#/definitions/ulimits", + "description": "Override the default ulimits for a container." + }, + "use_api_socket": { + "type": "boolean", + "description": "Bind mount Docker API socket and required auth." + }, + "user": { + "type": "string", + "description": "Username or UID to run the container process as." + }, + "uts": { + "type": "string", + "description": "UTS namespace to use. 'host' shares the host's UTS namespace." + }, + "userns_mode": { + "type": "string", + "description": "User namespace to use. 'host' shares the host's user namespace." + }, + "volumes": { + "type": "array", + "description": "Mount host paths or named volumes accessible to the container. Short syntax (VOLUME:CONTAINER_PATH[:MODE])", + "items": { + "oneOf": [ + {"type": "string"}, + { + "type": "object", + "required": ["type"], + "properties": { + "type": { + "type": "string", + "enum": ["bind", "volume", "tmpfs", "cluster", "npipe", "image"], + "description": "The mount type: bind for mounting host directories, volume for named volumes, tmpfs for temporary filesystems, cluster for cluster volumes, npipe for named pipes, or image for mounting from an image." + }, + "source": { + "type": "string", + "description": "The source of the mount, a path on the host for a bind mount, a docker image reference for an image mount, or the name of a volume defined in the top-level volumes key. Not applicable for a tmpfs mount." + }, + "target": { + "type": "string", + "description": "The path in the container where the volume is mounted." + }, + "read_only": { + "type": ["boolean", "string"], + "description": "Flag to set the volume as read-only." + }, + "consistency": { + "type": "string", + "description": "The consistency requirements for the mount. Available values are platform specific." + }, + "bind": { + "type": "object", + "description": "Configuration specific to bind mounts.", + "properties": { + "propagation": { + "type": "string", + "description": "The propagation mode for the bind mount: 'shared', 'slave', 'private', 'rshared', 'rslave', or 'rprivate'." + }, + "create_host_path": { + "type": ["boolean", "string"], + "description": "Create the host path if it doesn't exist." + }, + "recursive": { + "type": "string", + "enum": ["enabled", "disabled", "writable", "readonly"], + "description": "Recursively mount the source directory." + }, + "selinux": { + "type": "string", + "enum": ["z", "Z"], + "description": "SELinux relabeling options: 'z' for shared content, 'Z' for private unshared content." + } + }, + "additionalProperties": false, + "patternProperties": {"^x-": {}} + }, + "volume": { + "type": "object", + "description": "Configuration specific to volume mounts.", + "properties": { + "labels": { + "$ref": "#/definitions/list_or_dict", + "description": "Labels to apply to the volume." + }, + "nocopy": { + "type": ["boolean", "string"], + "description": "Flag to disable copying of data from a container when a volume is created." + }, + "subpath": { + "type": "string", + "description": "Path within the volume to mount instead of the volume root." + } + }, + "additionalProperties": false, + "patternProperties": {"^x-": {}} + }, + "tmpfs": { + "type": "object", + "description": "Configuration specific to tmpfs mounts.", + "properties": { + "size": { + "oneOf": [ + {"type": "integer", "minimum": 0}, + {"type": "string"} + ], + "description": "Size of the tmpfs mount in bytes." + }, + "mode": { + "type": ["number", "string"], + "description": "File mode of the tmpfs in octal." + } + }, + "additionalProperties": false, + "patternProperties": {"^x-": {}} + }, + "image": { + "type": "object", + "description": "Configuration specific to image mounts.", + "properties": { + "subpath": { + "type": "string", + "description": "Path within the image to mount instead of the image root." + } + }, + "additionalProperties": false, + "patternProperties": {"^x-": {}} + } + }, + "additionalProperties": false, + "patternProperties": {"^x-": {}} + } + ] + }, + "uniqueItems": true + }, + "volumes_from": { + "type": "array", + "items": {"type": "string"}, + "uniqueItems": true, + "description": "Mount volumes from another service or container. Optionally specify read-only access (ro) or read-write (rw)." + }, + "working_dir": { + "type": "string", + "description": "The working directory in which the entrypoint or command will be run" + } + }, + "patternProperties": {"^x-": {}}, + "additionalProperties": false + }, + + "healthcheck": { + "type": "object", + "description": "Configuration options to determine whether the container is healthy.", + "properties": { + "disable": { + "type": ["boolean", "string"], + "description": "Disable any container-specified healthcheck. Set to true to disable." + }, + "interval": { + "type": "string", + "description": "Time between running the check (e.g., '1s', '1m30s'). Default: 30s." + }, + "retries": { + "type": ["number", "string"], + "description": "Number of consecutive failures needed to consider the container as unhealthy. Default: 3." + }, + "test": { + "oneOf": [ + {"type": "string"}, + {"type": "array", "items": {"type": "string"}} + ], + "description": "The test to perform to check container health. Can be a string or a list. The first item is either NONE, CMD, or CMD-SHELL. If it's CMD, the rest of the command is exec'd. If it's CMD-SHELL, the rest is run in the shell." + }, + "timeout": { + "type": "string", + "description": "Maximum time to allow one check to run (e.g., '1s', '1m30s'). Default: 30s." + }, + "start_period": { + "type": "string", + "description": "Start period for the container to initialize before starting health-retries countdown (e.g., '1s', '1m30s'). Default: 0s." + }, + "start_interval": { + "type": "string", + "description": "Time between running the check during the start period (e.g., '1s', '1m30s'). Default: interval value." + } + }, + "additionalProperties": false, + "patternProperties": {"^x-": {}} + }, + "development": { + "type": ["object", "null"], + "description": "Development configuration for the service, used for development workflows.", + "properties": { + "watch": { + "type": "array", + "description": "Configure watch mode for the service, which monitors file changes and performs actions in response.", + "items": { + "type": "object", + "required": ["path", "action"], + "properties": { + "ignore": { + "$ref": "#/definitions/string_or_list", + "description": "Patterns to exclude from watching." + }, + "include": { + "$ref": "#/definitions/string_or_list", + "description": "Patterns to include in watching." + }, + "path": { + "type": "string", + "description": "Path to watch for changes." + }, + "action": { + "type": "string", + "enum": ["rebuild", "sync", "restart", "sync+restart", "sync+exec"], + "description": "Action to take when a change is detected: rebuild the container, sync files, restart the container, sync and restart, or sync and execute a command." + }, + "target": { + "type": "string", + "description": "Target path in the container for sync operations." + }, + "exec": { + "$ref": "#/definitions/service_hook", + "description": "Command to execute when a change is detected and action is sync+exec." + }, + "initial_sync": { + "type": "boolean", + "description": "Ensure that an initial synchronization is done before starting watch mode for sync+x triggers" + } + }, + "additionalProperties": false, + "patternProperties": {"^x-": {}} + } + } + }, + "additionalProperties": false, + "patternProperties": {"^x-": {}} + }, + "deployment": { + "type": ["object", "null"], + "description": "Deployment configuration for the service.", + "properties": { + "mode": { + "type": "string", + "description": "Deployment mode for the service: 'replicated' (default) or 'global'." + }, + "endpoint_mode": { + "type": "string", + "description": "Endpoint mode for the service: 'vip' (default) or 'dnsrr'." + }, + "replicas": { + "type": ["integer", "string"], + "description": "Number of replicas of the service container to run." + }, + "labels": { + "$ref": "#/definitions/list_or_dict", + "description": "Labels to apply to the service." + }, + "rollback_config": { + "type": "object", + "description": "Configuration for rolling back a service update.", + "properties": { + "parallelism": { + "type": ["integer", "string"], + "description": "The number of containers to rollback at a time. If set to 0, all containers rollback simultaneously." + }, + "delay": { + "type": "string", + "description": "The time to wait between each container group's rollback (e.g., '1s', '1m30s')." + }, + "failure_action": { + "type": "string", + "description": "Action to take if a rollback fails: 'continue', 'pause'." + }, + "monitor": { + "type": "string", + "description": "Duration to monitor each task for failures after it is created (e.g., '1s', '1m30s')." + }, + "max_failure_ratio": { + "type": ["number", "string"], + "description": "Failure rate to tolerate during a rollback." + }, + "order": { + "type": "string", + "enum": ["start-first", "stop-first"], + "description": "Order of operations during rollbacks: 'stop-first' (default) or 'start-first'." + } + }, + "additionalProperties": false, + "patternProperties": {"^x-": {}} + }, + "update_config": { + "type": "object", + "description": "Configuration for updating a service.", + "properties": { + "parallelism": { + "type": ["integer", "string"], + "description": "The number of containers to update at a time." + }, + "delay": { + "type": "string", + "description": "The time to wait between updating a group of containers (e.g., '1s', '1m30s')." + }, + "failure_action": { + "type": "string", + "description": "Action to take if an update fails: 'continue', 'pause', 'rollback'." + }, + "monitor": { + "type": "string", + "description": "Duration to monitor each updated task for failures after it is created (e.g., '1s', '1m30s')." + }, + "max_failure_ratio": { + "type": ["number", "string"], + "description": "Failure rate to tolerate during an update (0 to 1)." + }, + "order": { + "type": "string", + "enum": ["start-first", "stop-first"], + "description": "Order of operations during updates: 'stop-first' (default) or 'start-first'." + } + }, + "additionalProperties": false, + "patternProperties": {"^x-": {}} + }, + "resources": { + "type": "object", + "description": "Resource constraints and reservations for the service.", + "properties": { + "limits": { + "type": "object", + "description": "Resource limits for the service containers.", + "properties": { + "cpus": { + "type": ["number", "string"], + "description": "Limit for how much of the available CPU resources, as number of cores, a container can use." + }, + "memory": { + "type": "string", + "description": "Limit on the amount of memory a container can allocate (e.g., '1g', '1024m')." + }, + "pids": { + "type": ["integer", "string"], + "description": "Maximum number of PIDs available to the container." + } + }, + "additionalProperties": false, + "patternProperties": {"^x-": {}} + }, + "reservations": { + "type": "object", + "description": "Resource reservations for the service containers.", + "properties": { + "cpus": { + "type": ["number", "string"], + "description": "Reservation for how much of the available CPU resources, as number of cores, a container can use." + }, + "memory": { + "type": "string", + "description": "Reservation on the amount of memory a container can allocate (e.g., '1g', '1024m')." + }, + "generic_resources": { + "$ref": "#/definitions/generic_resources", + "description": "User-defined resources to reserve." + }, + "devices": { + "$ref": "#/definitions/devices", + "description": "Device reservations for the container." + } + }, + "additionalProperties": false, + "patternProperties": {"^x-": {}} + } + }, + "additionalProperties": false, + "patternProperties": {"^x-": {}} + }, + "restart_policy": { + "type": "object", + "description": "Restart policy for the service containers.", + "properties": { + "condition": { + "type": "string", + "description": "Condition for restarting the container: 'none', 'on-failure', 'any'." + }, + "delay": { + "type": "string", + "description": "Delay between restart attempts (e.g., '1s', '1m30s')." + }, + "max_attempts": { + "type": ["integer", "string"], + "description": "Maximum number of restart attempts before giving up." + }, + "window": { + "type": "string", + "description": "Time window used to evaluate the restart policy (e.g., '1s', '1m30s')." + } + }, + "additionalProperties": false, + "patternProperties": {"^x-": {}} + }, + "placement": { + "type": "object", + "description": "Constraints and preferences for the platform to select a physical node to run service containers", + "properties": { + "constraints": { + "type": "array", + "items": {"type": "string"}, + "description": "Placement constraints for the service (e.g., 'node.role==manager')." + }, + "preferences": { + "type": "array", + "description": "Placement preferences for the service.", + "items": { + "type": "object", + "properties": { + "spread": { + "type": "string", + "description": "Spread tasks evenly across values of the specified node label." + } + }, + "additionalProperties": false, + "patternProperties": {"^x-": {}} + } + }, + "max_replicas_per_node": { + "type": ["integer", "string"], + "description": "Maximum number of replicas of the service." + } + }, + "additionalProperties": false, + "patternProperties": {"^x-": {}} + } + }, + "additionalProperties": false, + "patternProperties": {"^x-": {}} + }, + + "generic_resources": { + "type": "array", + "description": "User-defined resources for services, allowing services to reserve specialized hardware resources.", + "items": { + "type": "object", + "properties": { + "discrete_resource_spec": { + "type": "object", + "description": "Specification for discrete (countable) resources.", + "properties": { + "kind": { + "type": "string", + "description": "Type of resource (e.g., 'GPU', 'FPGA', 'SSD')." + }, + "value": { + "type": ["number", "string"], + "description": "Number of resources of this kind to reserve." + } + }, + "additionalProperties": false, + "patternProperties": {"^x-": {}} + } + }, + "additionalProperties": false, + "patternProperties": {"^x-": {}} + } + }, + + "devices": { + "type": "array", + "description": "Device reservations for containers, allowing services to access specific hardware devices.", + "items": { + "type": "object", + "properties": { + "capabilities": { + "$ref": "#/definitions/list_of_strings", + "description": "List of capabilities the device needs to have (e.g., 'gpu', 'compute', 'utility')." + }, + "count": { + "type": ["string", "integer"], + "description": "Number of devices of this type to reserve." + }, + "device_ids": { + "$ref": "#/definitions/list_of_strings", + "description": "List of specific device IDs to reserve." + }, + "driver": { + "type": "string", + "description": "Device driver to use (e.g., 'nvidia')." + }, + "options": { + "$ref": "#/definitions/list_or_dict", + "description": "Driver-specific options for the device." + } + }, + "additionalProperties": false, + "patternProperties": {"^x-": {}}, + "required": [ + "capabilities" + ] + } + }, + + "gpus": { + "oneOf": [ + { + "type": "string", + "enum": ["all"], + "description": "Use all available GPUs." + }, + { + "type": "array", + "description": "List of specific GPU devices to use.", + "items": { + "type": "object", + "properties": { + "capabilities": { + "$ref": "#/definitions/list_of_strings", + "description": "List of capabilities the GPU needs to have (e.g., 'compute', 'utility')." + }, + "count": { + "type": ["string", "integer"], + "description": "Number of GPUs to use." + }, + "device_ids": { + "$ref": "#/definitions/list_of_strings", + "description": "List of specific GPU device IDs to use." + }, + "driver": { + "type": "string", + "description": "GPU driver to use (e.g., 'nvidia')." + }, + "options": { + "$ref": "#/definitions/list_or_dict", + "description": "Driver-specific options for the GPU." + } + } + }, + "additionalProperties": false, + "patternProperties": {"^x-": {}} + } + ] + }, + + "include": { + "description": "Compose application or sub-projects to be included.", + "oneOf": [ + {"type": "string"}, + { + "type": "object", + "properties": { + "path": { + "$ref": "#/definitions/string_or_list", + "description": "Path to the Compose application or sub-project files to include." + }, + "env_file": { + "$ref": "#/definitions/string_or_list", + "description": "Path to the environment files to use to define default values when interpolating variables in the Compose files being parsed." + }, + "project_directory": { + "type": "string", + "description": "Path to resolve relative paths set in the Compose file" + } + }, + "additionalProperties": false + } + ] + }, + + "network": { + "type": ["object", "null"], + "description": "Network configuration for the Compose application.", + "properties": { + "name": { + "type": "string", + "description": "Custom name for this network." + }, + "driver": { + "type": "string", + "description": "Specify which driver should be used for this network. Default is 'bridge'." + }, + "driver_opts": { + "type": "object", + "description": "Specify driver-specific options defined as key/value pairs.", + "patternProperties": { + "^.+$": {"type": ["string", "number"]} + } + }, + "ipam": { + "type": "object", + "description": "Custom IP Address Management configuration for this network.", + "properties": { + "driver": { + "type": "string", + "description": "Custom IPAM driver, instead of the default." + }, + "config": { + "type": "array", + "description": "List of IPAM configuration blocks.", + "items": { + "type": "object", + "properties": { + "subnet": { + "type": "string", + "description": "Subnet in CIDR format that represents a network segment." + }, + "ip_range": { + "type": "string", + "description": "Range of IPs from which to allocate container IPs." + }, + "gateway": { + "type": "string", + "description": "IPv4 or IPv6 gateway for the subnet." + }, + "aux_addresses": { + "type": "object", + "description": "Auxiliary IPv4 or IPv6 addresses used by Network driver.", + "additionalProperties": false, + "patternProperties": {"^.+$": {"type": "string"}} + } + }, + "additionalProperties": false, + "patternProperties": {"^x-": {}} + } + }, + "options": { + "type": "object", + "description": "Driver-specific options for the IPAM driver.", + "additionalProperties": false, + "patternProperties": {"^.+$": {"type": "string"}} + } + }, + "additionalProperties": false, + "patternProperties": {"^x-": {}} + }, + "external": { + "type": ["boolean", "string", "object"], + "description": "Specifies that this network already exists and was created outside of Compose.", + "properties": { + "name": { + "deprecated": true, + "type": "string", + "description": "Specifies the name of the external network. Deprecated: use the 'name' property instead." + } + }, + "additionalProperties": false, + "patternProperties": {"^x-": {}} + }, + "internal": { + "type": ["boolean", "string"], + "description": "Create an externally isolated network." + }, + "enable_ipv4": { + "type": ["boolean", "string"], + "description": "Enable IPv4 networking." + }, + "enable_ipv6": { + "type": ["boolean", "string"], + "description": "Enable IPv6 networking." + }, + "attachable": { + "type": ["boolean", "string"], + "description": "If true, standalone containers can attach to this network." + }, + "labels": { + "$ref": "#/definitions/list_or_dict", + "description": "Add metadata to the network using labels." + } + }, + "additionalProperties": false, + "patternProperties": {"^x-": {}} + }, + + "volume": { + "type": ["object", "null"], + "description": "Volume configuration for the Compose application.", + "properties": { + "name": { + "type": "string", + "description": "Custom name for this volume." + }, + "driver": { + "type": "string", + "description": "Specify which volume driver should be used for this volume." + }, + "driver_opts": { + "type": "object", + "description": "Specify driver-specific options.", + "patternProperties": { + "^.+$": {"type": ["string", "number"]} + } + }, + "external": { + "type": ["boolean", "string", "object"], + "description": "Specifies that this volume already exists and was created outside of Compose.", + "properties": { + "name": { + "deprecated": true, + "type": "string", + "description": "Specifies the name of the external volume. Deprecated: use the 'name' property instead." + } + }, + "additionalProperties": false, + "patternProperties": {"^x-": {}} + }, + "labels": { + "$ref": "#/definitions/list_or_dict", + "description": "Add metadata to the volume using labels." + } + }, + "additionalProperties": false, + "patternProperties": {"^x-": {}} + }, + + "secret": { + "type": "object", + "description": "Secret configuration for the Compose application.", + "properties": { + "name": { + "type": "string", + "description": "Custom name for this secret." + }, + "environment": { + "type": "string", + "description": "Name of an environment variable from which to get the secret value." + }, + "file": { + "type": "string", + "description": "Path to a file containing the secret value." + }, + "external": { + "type": ["boolean", "string", "object"], + "description": "Specifies that this secret already exists and was created outside of Compose.", + "properties": { + "name": { + "type": "string", + "description": "Specifies the name of the external secret." + } + } + }, + "labels": { + "$ref": "#/definitions/list_or_dict", + "description": "Add metadata to the secret using labels." + }, + "driver": { + "type": "string", + "description": "Specify which secret driver should be used for this secret." + }, + "driver_opts": { + "type": "object", + "description": "Specify driver-specific options.", + "patternProperties": { + "^.+$": {"type": ["string", "number"]} + } + }, + "template_driver": { + "type": "string", + "description": "Driver to use for templating the secret's value." + } + }, + "additionalProperties": false, + "patternProperties": {"^x-": {}} + }, + + "config": { + "type": "object", + "description": "Config configuration for the Compose application.", + "properties": { + "name": { + "type": "string", + "description": "Custom name for this config." + }, + "content": { + "type": "string", + "description": "Inline content of the config." + }, + "environment": { + "type": "string", + "description": "Name of an environment variable from which to get the config value." + }, + "file": { + "type": "string", + "description": "Path to a file containing the config value." + }, + "external": { + "type": ["boolean", "string", "object"], + "description": "Specifies that this config already exists and was created outside of Compose.", + "properties": { + "name": { + "deprecated": true, + "type": "string", + "description": "Specifies the name of the external config. Deprecated: use the 'name' property instead." + } + } + }, + "labels": { + "$ref": "#/definitions/list_or_dict", + "description": "Add metadata to the config using labels." + }, + "template_driver": { + "type": "string", + "description": "Driver to use for templating the config's value." + } + }, + "additionalProperties": false, + "patternProperties": {"^x-": {}} + }, + + "model": { + "type": "object", + "description": "Language Model for the Compose application.", + "properties": { + "name": { + "type": "string", + "description": "Custom name for this model." + }, + "model": { + "type": "string", + "description": "Language Model to run." + }, + "context_size": { + "type": "integer" + }, + "runtime_flags": { + "type": "array", + "items": {"type": "string"}, + "description": "Raw runtime flags to pass to the inference engine." + } + }, + "required": ["model"], + "additionalProperties": false, + "patternProperties": {"^x-": {}} + }, + + "command": { + "oneOf": [ + { + "type": "null", + "description": "No command specified, use the container's default command." + }, + { + "type": "string", + "description": "Command as a string, which will be executed in a shell (e.g., '/bin/sh -c')." + }, + { + "type": "array", + "description": "Command as an array of strings, which will be executed directly without a shell.", + "items": { + "type": "string", + "description": "Part of the command (executable or argument)." + } + } + ], + "description": "Command to run in the container, which can be specified as a string (shell form) or array (exec form)." + }, + + "service_hook": { + "type": "object", + "description": "Configuration for service lifecycle hooks, which are commands executed at specific points in a container's lifecycle.", + "properties": { + "command": { + "$ref": "#/definitions/command", + "description": "Command to execute as part of the hook." + }, + "user": { + "type": "string", + "description": "User to run the command as." + }, + "privileged": { + "type": ["boolean", "string"], + "description": "Whether to run the command with extended privileges." + }, + "working_dir": { + "type": "string", + "description": "Working directory for the command." + }, + "environment": { + "$ref": "#/definitions/list_or_dict", + "description": "Environment variables for the command." + } + }, + "additionalProperties": false, + "patternProperties": {"^x-": {}}, + "required": ["command"] + }, + + "env_file": { + "oneOf": [ + { + "type": "string", + "description": "Path to a file containing environment variables." + }, + { + "type": "array", + "description": "List of paths to files containing environment variables.", + "items": { + "oneOf": [ + { + "type": "string", + "description": "Path to a file containing environment variables." + }, + { + "type": "object", + "description": "Detailed configuration for an environment file.", + "additionalProperties": false, + "properties": { + "path": { + "type": "string", + "description": "Path to the environment file." + }, + "format": { + "type": "string", + "description": "Format attribute lets you to use an alternative file formats for env_file. When not set, env_file is parsed according to Compose rules." + }, + "required": { + "type": ["boolean", "string"], + "default": true, + "description": "Whether the file is required. If true and the file doesn't exist, an error will be raised." + } + }, + "required": [ + "path" + ] + } + ] + } + } + ] + }, + + "label_file": { + "oneOf": [ + { + "type": "string", + "description": "Path to a file containing Docker labels." + }, + { + "type": "array", + "description": "List of paths to files containing Docker labels.", + "items": { + "type": "string", + "description": "Path to a file containing Docker labels." + } + } + ] + }, + + "string_or_list": { + "oneOf": [ + { + "type": "string", + "description": "A single string value." + }, + { + "$ref": "#/definitions/list_of_strings", + "description": "A list of string values." + } + ], + "description": "Either a single string or a list of strings." + }, + + "list_of_strings": { + "type": "array", + "description": "A list of unique string values.", + "items": { + "type": "string", + "description": "A string value in the list." + }, + "uniqueItems": true + }, + + "list_or_dict": { + "oneOf": [ + { + "type": "object", + "description": "A dictionary mapping keys to values.", + "patternProperties": { + ".+": { + "type": ["string", "number", "boolean", "null"], + "description": "Value for the key, which can be a string, number, boolean, or null." + } + }, + "additionalProperties": false + }, + { + "type": "array", + "description": "A list of unique string values.", + "items": { + "type": "string", + "description": "A string value in the list." + }, + "uniqueItems": true + } + ], + "description": "Either a dictionary mapping keys to values, or a list of strings." + }, + + "extra_hosts": { + "oneOf": [ + { + "type": "object", + "description": "list mapping hostnames to IP addresses.", + "patternProperties": { + ".+": { + "oneOf": [ + { + "type": "string", + "description": "IP address for the hostname." + }, + { + "type": "array", + "description": "List of IP addresses for the hostname.", + "items": { + "type": "string", + "description": "IP address for the hostname." + }, + "uniqueItems": false + } + ] + } + }, + "additionalProperties": false + }, + { + "type": "array", + "description": "List of host:IP mappings in the format 'hostname:IP'.", + "items": { + "type": "string", + "description": "Host:IP mapping in the format 'hostname:IP'." + }, + "uniqueItems": true + } + ], + "description": "Additional hostnames to be defined in the container's /etc/hosts file." + }, + + "blkio_limit": { + "type": "object", + "description": "Block IO limit for a specific device.", + "properties": { + "path": { + "type": "string", + "description": "Path to the device (e.g., '/dev/sda')." + }, + "rate": { + "type": ["integer", "string"], + "description": "Rate limit in bytes per second or IO operations per second." + } + }, + "additionalProperties": false + }, + "blkio_weight": { + "type": "object", + "description": "Block IO weight for a specific device.", + "properties": { + "path": { + "type": "string", + "description": "Path to the device (e.g., '/dev/sda')." + }, + "weight": { + "type": ["integer", "string"], + "description": "Relative weight for the device, between 10 and 1000." + } + }, + "additionalProperties": false + }, + "service_config_or_secret": { + "type": "array", + "description": "Configuration for service configs or secrets, defining how they are mounted in the container.", + "items": { + "oneOf": [ + { + "type": "string", + "description": "Name of the config or secret to grant access to." + }, + { + "type": "object", + "description": "Detailed configuration for a config or secret.", + "properties": { + "source": { + "type": "string", + "description": "Name of the config or secret as defined in the top-level configs or secrets section." + }, + "target": { + "type": "string", + "description": "Path in the container where the config or secret will be mounted. Defaults to / for configs and /run/secrets/ for secrets." + }, + "uid": { + "type": "string", + "description": "UID of the file in the container. Default is 0 (root)." + }, + "gid": { + "type": "string", + "description": "GID of the file in the container. Default is 0 (root)." + }, + "mode": { + "type": ["number", "string"], + "description": "File permission mode inside the container, in octal. Default is 0444 for configs and 0400 for secrets." + } + }, + "additionalProperties": false, + "patternProperties": {"^x-": {}} + } + ] + } + }, + "ulimits": { + "type": "object", + "description": "Container ulimit options, controlling resource limits for processes inside the container.", + "patternProperties": { + "^[a-z]+$": { + "oneOf": [ + { + "type": ["integer", "string"], + "description": "Single value for both soft and hard limits." + }, + { + "type": "object", + "description": "Separate soft and hard limits.", + "properties": { + "hard": { + "type": ["integer", "string"], + "description": "Hard limit for the ulimit type. This is the maximum allowed value." + }, + "soft": { + "type": ["integer", "string"], + "description": "Soft limit for the ulimit type. This is the value that's actually enforced." + } + }, + "required": ["soft", "hard"], + "additionalProperties": false, + "patternProperties": {"^x-": {}} + } + ] + } + } + } + } +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/schema/schema.go b/vendor/github.com/compose-spec/compose-go/v2/schema/schema.go new file mode 100644 index 00000000..a73eda24 --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/schema/schema.go @@ -0,0 +1,149 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package schema + +import ( + // Enable support for embedded static resources + _ "embed" + "encoding/json" + "errors" + "fmt" + "slices" + "strings" + "time" + + "github.com/santhosh-tekuri/jsonschema/v6" + "github.com/santhosh-tekuri/jsonschema/v6/kind" + "golang.org/x/text/language" + "golang.org/x/text/message" +) + +func durationFormatChecker(input any) error { + value, ok := input.(string) + if !ok { + return fmt.Errorf("expected string") + } + _, err := time.ParseDuration(value) + return err +} + +// Schema is the compose-spec JSON schema +// +//go:embed compose-spec.json +var Schema string + +// Validate uses the jsonschema to validate the configuration +func Validate(config map[string]interface{}) error { + compiler := jsonschema.NewCompiler() + shema, err := jsonschema.UnmarshalJSON(strings.NewReader(Schema)) + if err != nil { + return err + } + err = compiler.AddResource("compose-spec.json", shema) + if err != nil { + return err + } + compiler.RegisterFormat(&jsonschema.Format{ + Name: "duration", + Validate: durationFormatChecker, + }) + schema := compiler.MustCompile("compose-spec.json") + + // santhosh-tekuri doesn't allow derived types + // see https://github.com/santhosh-tekuri/jsonschema/pull/240 + marshaled, err := json.Marshal(config) + if err != nil { + return err + } + + var raw map[string]interface{} + err = json.Unmarshal(marshaled, &raw) + if err != nil { + return err + } + + err = schema.Validate(raw) + var verr *jsonschema.ValidationError + if ok := errors.As(err, &verr); ok { + return validationError{getMostSpecificError(verr)} + } + return err +} + +type validationError struct { + err *jsonschema.ValidationError +} + +func (e validationError) Error() string { + path := strings.Join(e.err.InstanceLocation, ".") + p := message.NewPrinter(language.English) + switch k := e.err.ErrorKind.(type) { + case *kind.Type: + return fmt.Sprintf("%s must be a %s", path, humanReadableType(k.Want...)) + case *kind.Minimum: + return fmt.Sprintf("%s must be greater than or equal to %s", path, k.Want.Num()) + case *kind.Maximum: + return fmt.Sprintf("%s must be less than or equal to %s", path, k.Want.Num()) + } + return fmt.Sprintf("%s %s", path, e.err.ErrorKind.LocalizedString(p)) +} + +func humanReadableType(want ...string) string { + if len(want) == 1 { + switch want[0] { + case "object": + return "mapping" + default: + return want[0] + } + } + + for i, s := range want { + want[i] = humanReadableType(s) + } + + slices.Sort(want) + return fmt.Sprintf( + "%s or %s", + strings.Join(want[0:len(want)-1], ", "), + want[len(want)-1], + ) +} + +func getMostSpecificError(err *jsonschema.ValidationError) *jsonschema.ValidationError { + var mostSpecificError *jsonschema.ValidationError + if len(err.Causes) == 0 { + return err + } + for _, cause := range err.Causes { + cause = getMostSpecificError(cause) + if specificity(cause) > specificity(mostSpecificError) { + mostSpecificError = cause + } + } + return mostSpecificError +} + +func specificity(err *jsonschema.ValidationError) int { + if err == nil { + return -1 + } + if _, ok := err.ErrorKind.(*kind.AdditionalProperties); ok { + return len(err.InstanceLocation) + 1 + } + return len(err.InstanceLocation) +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/schema/using-variables.yaml b/vendor/github.com/compose-spec/compose-go/v2/schema/using-variables.yaml new file mode 100644 index 00000000..3f302cd6 --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/schema/using-variables.yaml @@ -0,0 +1,123 @@ +name: ${VARIABLE} +services: + foo: + deploy: + mode: ${VARIABLE} + replicas: ${VARIABLE} + rollback_config: + parallelism: ${VARIABLE} + delay: ${VARIABLE} + failure_action: ${VARIABLE} + monitor: ${VARIABLE} + max_failure_ratio: ${VARIABLE} + update_config: + parallelism: ${VARIABLE} + delay: ${VARIABLE} + failure_action: ${VARIABLE} + monitor: ${VARIABLE} + max_failure_ratio: ${VARIABLE} + resources: + limits: + memory: ${VARIABLE} + reservations: + memory: ${VARIABLE} + generic_resources: + - discrete_resource_spec: + kind: ${VARIABLE} + value: ${VARIABLE} + - discrete_resource_spec: + kind: ${VARIABLE} + value: ${VARIABLE} + restart_policy: + condition: ${VARIABLE} + delay: ${VARIABLE} + max_attempts: ${VARIABLE} + window: ${VARIABLE} + placement: + max_replicas_per_node: ${VARIABLE} + preferences: + - spread: ${VARIABLE} + endpoint_mode: ${VARIABLE} + expose: + - ${VARIABLE} + external_links: + - ${VARIABLE} + extra_hosts: + - ${VARIABLE} + hostname: ${VARIABLE} + + healthcheck: + test: ${VARIABLE} + interval: ${VARIABLE} + timeout: ${VARIABLE} + retries: ${VARIABLE} + start_period: ${VARIABLE} + start_interval: ${VARIABLE} + image: ${VARIABLE} + mac_address: ${VARIABLE} + networks: + some-network: + aliases: + - ${VARIABLE} + other-network: + ipv4_address: ${VARIABLE} + ipv6_address: ${VARIABLE} + mac_address: ${VARIABLE} + ports: + - ${VARIABLE} + privileged: ${VARIABLE} + read_only: ${VARIABLE} + restart: ${VARIABLE} + secrets: + - source: ${VARIABLE} + target: ${VARIABLE} + uid: ${VARIABLE} + gid: ${VARIABLE} + mode: ${VARIABLE} + stdin_open: ${VARIABLE} + stop_grace_period: ${VARIABLE} + stop_signal: ${VARIABLE} + storage_opt: + size: ${VARIABLE} + sysctls: + net.core.somaxconn: ${VARIABLE} + tmpfs: + - ${VARIABLE} + tty: ${VARIABLE} + ulimits: + nproc: ${VARIABLE} + nofile: + soft: ${VARIABLE} + hard: ${VARIABLE} + user: ${VARIABLE} + volumes: + - ${VARIABLE}:${VARIABLE} + - type: tmpfs + target: ${VARIABLE} + tmpfs: + size: ${VARIABLE} + +networks: + network: + ipam: + driver: ${VARIABLE} + config: + - subnet: ${VARIABLE} + ip_range: ${VARIABLE} + gateway: ${VARIABLE} + aux_addresses: + host1: ${VARIABLE} + external-network: + external: ${VARIABLE} + +volumes: + external-volume: + external: ${VARIABLE} + +configs: + config1: + external: ${VARIABLE} + +secrets: + secret1: + external: ${VARIABLE} diff --git a/vendor/github.com/compose-spec/compose-go/v2/template/template.go b/vendor/github.com/compose-spec/compose-go/v2/template/template.go new file mode 100644 index 00000000..beb61ed8 --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/template/template.go @@ -0,0 +1,380 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package template + +import ( + "errors" + "fmt" + "regexp" + "sort" + "strings" + + "github.com/sirupsen/logrus" +) + +const ( + delimiter = "\\$" + substitutionNamed = "[_a-z][_a-z0-9]*" + substitutionBraced = "[_a-z][_a-z0-9]*(?::?[-+?](.*))?" + groupEscaped = "escaped" + groupNamed = "named" + groupBraced = "braced" + groupInvalid = "invalid" +) + +var ( + patternString = fmt.Sprintf( + "%s(?i:(?P<%s>%s)|(?P<%s>%s)|{(?:(?P<%s>%s)}|(?P<%s>)))", + delimiter, + groupEscaped, delimiter, + groupNamed, substitutionNamed, + groupBraced, substitutionBraced, + groupInvalid, + ) + + DefaultPattern = regexp.MustCompile(patternString) +) + +// InvalidTemplateError is returned when a variable template is not in a valid +// format +type InvalidTemplateError struct { + Template string +} + +func (e InvalidTemplateError) Error() string { + return fmt.Sprintf("Invalid template: %#v", e.Template) +} + +// MissingRequiredError is returned when a variable template is missing +type MissingRequiredError struct { + Variable string + Reason string +} + +func (e MissingRequiredError) Error() string { + if e.Reason != "" { + return fmt.Sprintf("required variable %s is missing a value: %s", e.Variable, e.Reason) + } + return fmt.Sprintf("required variable %s is missing a value", e.Variable) +} + +// Mapping is a user-supplied function which maps from variable names to values. +// Returns the value as a string and a bool indicating whether +// the value is present, to distinguish between an empty string +// and the absence of a value. +type Mapping func(string) (string, bool) + +// SubstituteFunc is a user-supplied function that apply substitution. +// Returns the value as a string, a bool indicating if the function could apply +// the substitution and an error. +type SubstituteFunc func(string, Mapping) (string, bool, error) + +// ReplacementFunc is a user-supplied function that is apply to the matching +// substring. Returns the value as a string and an error. +type ReplacementFunc func(string, Mapping, *Config) (string, error) + +type Config struct { + pattern *regexp.Regexp + substituteFunc SubstituteFunc + replacementFunc ReplacementFunc + logging bool +} + +type Option func(*Config) + +func WithPattern(pattern *regexp.Regexp) Option { + return func(cfg *Config) { + cfg.pattern = pattern + } +} + +func WithSubstitutionFunction(subsFunc SubstituteFunc) Option { + return func(cfg *Config) { + cfg.substituteFunc = subsFunc + } +} + +func WithReplacementFunction(replacementFunc ReplacementFunc) Option { + return func(cfg *Config) { + cfg.replacementFunc = replacementFunc + } +} + +func WithoutLogging(cfg *Config) { + cfg.logging = false +} + +// SubstituteWithOptions substitute variables in the string with their values. +// It accepts additional options such as a custom function or pattern. +func SubstituteWithOptions(template string, mapping Mapping, options ...Option) (string, error) { + var returnErr error + + cfg := &Config{ + pattern: DefaultPattern, + replacementFunc: DefaultReplacementFunc, + logging: true, + } + for _, o := range options { + o(cfg) + } + + result := cfg.pattern.ReplaceAllStringFunc(template, func(substring string) string { + replacement, err := cfg.replacementFunc(substring, mapping, cfg) + if err != nil { + // Add the template for template errors + var tmplErr *InvalidTemplateError + if errors.As(err, &tmplErr) { + if tmplErr.Template == "" { + tmplErr.Template = template + } + } + // Save the first error to be returned + if returnErr == nil { + returnErr = err + } + + } + return replacement + }) + + return result, returnErr +} + +func DefaultReplacementFunc(substring string, mapping Mapping, cfg *Config) (string, error) { + value, _, err := DefaultReplacementAppliedFunc(substring, mapping, cfg) + return value, err +} + +func DefaultReplacementAppliedFunc(substring string, mapping Mapping, cfg *Config) (string, bool, error) { + pattern := cfg.pattern + subsFunc := cfg.substituteFunc + if subsFunc == nil { + _, subsFunc = getSubstitutionFunctionForTemplate(substring) + } + + closingBraceIndex := getFirstBraceClosingIndex(substring) + rest := "" + if closingBraceIndex > -1 { + rest = substring[closingBraceIndex+1:] + substring = substring[0 : closingBraceIndex+1] + } + + matches := pattern.FindStringSubmatch(substring) + groups := matchGroups(matches, pattern) + if escaped := groups[groupEscaped]; escaped != "" { + return escaped, true, nil + } + + braced := false + substitution := groups[groupNamed] + if substitution == "" { + substitution = groups[groupBraced] + braced = true + } + + if substitution == "" { + return "", false, &InvalidTemplateError{} + } + + if braced { + value, applied, err := subsFunc(substitution, mapping) + if err != nil { + return "", false, err + } + if applied { + interpolatedNested, err := SubstituteWith(rest, mapping, pattern) + if err != nil { + return "", false, err + } + return value + interpolatedNested, true, nil + } + } + + value, ok := mapping(substitution) + if !ok && cfg.logging { + logrus.Warnf("The %q variable is not set. Defaulting to a blank string.", substitution) + } + + return value, ok, nil +} + +// SubstituteWith substitute variables in the string with their values. +// It accepts additional substitute function. +func SubstituteWith(template string, mapping Mapping, pattern *regexp.Regexp, subsFuncs ...SubstituteFunc) (string, error) { + options := []Option{ + WithPattern(pattern), + } + if len(subsFuncs) > 0 { + options = append(options, WithSubstitutionFunction(subsFuncs[0])) + } + + return SubstituteWithOptions(template, mapping, options...) +} + +func getSubstitutionFunctionForTemplate(template string) (string, SubstituteFunc) { + interpolationMapping := []struct { + string + SubstituteFunc + }{ + {":?", requiredErrorWhenEmptyOrUnset}, + {"?", requiredErrorWhenUnset}, + {":-", defaultWhenEmptyOrUnset}, + {"-", defaultWhenUnset}, + {":+", defaultWhenNotEmpty}, + {"+", defaultWhenSet}, + } + sort.Slice(interpolationMapping, func(i, j int) bool { + idxI := strings.Index(template, interpolationMapping[i].string) + idxJ := strings.Index(template, interpolationMapping[j].string) + if idxI < 0 { + return false + } + if idxJ < 0 { + return true + } + return idxI < idxJ + }) + + return interpolationMapping[0].string, interpolationMapping[0].SubstituteFunc +} + +func getFirstBraceClosingIndex(s string) int { + openVariableBraces := 0 + for i := 0; i < len(s); i++ { + if s[i] == '}' { + openVariableBraces-- + if openVariableBraces == 0 { + return i + } + } + if s[i] == '{' { + openVariableBraces++ + i++ + } + } + return -1 +} + +// Substitute variables in the string with their values +func Substitute(template string, mapping Mapping) (string, error) { + return SubstituteWith(template, mapping, DefaultPattern) +} + +// Soft default (fall back if unset or empty) +func defaultWhenEmptyOrUnset(substitution string, mapping Mapping) (string, bool, error) { + return withDefaultWhenAbsence(substitution, mapping, true) +} + +// Hard default (fall back if-and-only-if empty) +func defaultWhenUnset(substitution string, mapping Mapping) (string, bool, error) { + return withDefaultWhenAbsence(substitution, mapping, false) +} + +func defaultWhenNotEmpty(substitution string, mapping Mapping) (string, bool, error) { + return withDefaultWhenPresence(substitution, mapping, true) +} + +func defaultWhenSet(substitution string, mapping Mapping) (string, bool, error) { + return withDefaultWhenPresence(substitution, mapping, false) +} + +func requiredErrorWhenEmptyOrUnset(substitution string, mapping Mapping) (string, bool, error) { + return withRequired(substitution, mapping, ":?", func(v string) bool { return v != "" }) +} + +func requiredErrorWhenUnset(substitution string, mapping Mapping) (string, bool, error) { + return withRequired(substitution, mapping, "?", func(_ string) bool { return true }) +} + +func withDefaultWhenPresence(substitution string, mapping Mapping, notEmpty bool) (string, bool, error) { + sep := "+" + if notEmpty { + sep = ":+" + } + if !strings.Contains(substitution, sep) { + return "", false, nil + } + name, defaultValue := partition(substitution, sep) + value, ok := mapping(name) + if ok && (!notEmpty || (notEmpty && value != "")) { + defaultValue, err := Substitute(defaultValue, mapping) + if err != nil { + return "", false, err + } + return defaultValue, true, nil + } + return value, true, nil +} + +func withDefaultWhenAbsence(substitution string, mapping Mapping, emptyOrUnset bool) (string, bool, error) { + sep := "-" + if emptyOrUnset { + sep = ":-" + } + if !strings.Contains(substitution, sep) { + return "", false, nil + } + name, defaultValue := partition(substitution, sep) + value, ok := mapping(name) + if !ok || (emptyOrUnset && value == "") { + defaultValue, err := Substitute(defaultValue, mapping) + if err != nil { + return "", false, err + } + return defaultValue, true, nil + } + return value, true, nil +} + +func withRequired(substitution string, mapping Mapping, sep string, valid func(string) bool) (string, bool, error) { + if !strings.Contains(substitution, sep) { + return "", false, nil + } + name, errorMessage := partition(substitution, sep) + value, ok := mapping(name) + if !ok || !valid(value) { + errorMessage, err := Substitute(errorMessage, mapping) + if err != nil { + return "", false, err + } + return "", true, &MissingRequiredError{ + Reason: errorMessage, + Variable: name, + } + } + return value, true, nil +} + +func matchGroups(matches []string, pattern *regexp.Regexp) map[string]string { + groups := make(map[string]string) + for i, name := range pattern.SubexpNames()[1:] { + groups[name] = matches[i+1] + } + return groups +} + +// Split the string at the first occurrence of sep, and return the part before the separator, +// and the part after the separator. +// +// If the separator is not found, return the string itself, followed by an empty string. +func partition(s, sep string) (string, string) { + if strings.Contains(s, sep) { + parts := strings.SplitN(s, sep, 2) + return parts[0], parts[1] + } + return s, "" +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/template/variables.go b/vendor/github.com/compose-spec/compose-go/v2/template/variables.go new file mode 100644 index 00000000..376137b5 --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/template/variables.go @@ -0,0 +1,157 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package template + +import ( + "regexp" + "strings" +) + +type Variable struct { + Name string + DefaultValue string + PresenceValue string + Required bool +} + +// ExtractVariables returns a map of all the variables defined in the specified +// compose file (dict representation) and their default value if any. +func ExtractVariables(configDict map[string]interface{}, pattern *regexp.Regexp) map[string]Variable { + if pattern == nil { + pattern = DefaultPattern + } + return recurseExtract(configDict, pattern) +} + +func recurseExtract(value interface{}, pattern *regexp.Regexp) map[string]Variable { + m := map[string]Variable{} + + switch value := value.(type) { + case string: + if values, is := extractVariable(value, pattern); is { + for _, v := range values { + m[v.Name] = v + } + } + case map[string]interface{}: + for _, elem := range value { + submap := recurseExtract(elem, pattern) + for key, value := range submap { + m[key] = value + } + } + + case []interface{}: + for _, elem := range value { + submap := recurseExtract(elem, pattern) + for key, value := range submap { + m[key] = value + } + } + } + + return m +} + +func extractVariable(value interface{}, pattern *regexp.Regexp) ([]Variable, bool) { + sValue, ok := value.(string) + if !ok { + return []Variable{}, false + } + matches := pattern.FindAllStringSubmatch(sValue, -1) + if len(matches) == 0 { + return []Variable{}, false + } + values := []Variable{} + for _, match := range matches { + groups := matchGroups(match, pattern) + if escaped := groups[groupEscaped]; escaped != "" { + continue + } + val := groups[groupNamed] + if val == "" { + val = groups[groupBraced] + s := match[0] + i := getFirstBraceClosingIndex(s) + if i > 0 { + val = s[2:i] + if len(s) > i { + if v, b := extractVariable(s[i+1:], pattern); b { + values = append(values, v...) + } + } + } + } + name := val + var defaultValue string + var presenceValue string + var required bool + i := strings.IndexFunc(val, func(r rune) bool { + if r >= 'a' && r <= 'z' { + return false + } + if r >= 'A' && r <= 'Z' { + return false + } + if r >= '0' && r <= '9' { + return false + } + if r == '_' { + return false + } + return true + }) + + if i > 0 { + name = val[:i] + rest := val[i:] + switch { + case strings.HasPrefix(rest, ":?"): + required = true + case strings.HasPrefix(rest, "?"): + required = true + case strings.HasPrefix(rest, ":-"): + defaultValue = rest[2:] + case strings.HasPrefix(rest, "-"): + defaultValue = rest[1:] + case strings.HasPrefix(rest, ":+"): + presenceValue = rest[2:] + case strings.HasPrefix(rest, "+"): + presenceValue = rest[1:] + } + } + + values = append(values, Variable{ + Name: name, + DefaultValue: defaultValue, + PresenceValue: presenceValue, + Required: required, + }) + + if defaultValue != "" { + if v, b := extractVariable(defaultValue, pattern); b { + values = append(values, v...) + } + } + if presenceValue != "" { + if v, b := extractVariable(presenceValue, pattern); b { + values = append(values, v...) + } + } + } + return values, len(values) > 0 +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/transform/build.go b/vendor/github.com/compose-spec/compose-go/v2/transform/build.go new file mode 100644 index 00000000..90a996cc --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/transform/build.go @@ -0,0 +1,48 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package transform + +import ( + "fmt" + + "github.com/compose-spec/compose-go/v2/tree" +) + +func transformBuild(data any, p tree.Path, ignoreParseError bool) (any, error) { + switch v := data.(type) { + case map[string]any: + return transformMapping(v, p, ignoreParseError) + case string: + return map[string]any{ + "context": v, + }, nil + default: + return data, fmt.Errorf("%s: invalid type %T for build", p, v) + } +} + +func defaultBuildContext(data any, _ tree.Path, _ bool) (any, error) { + switch v := data.(type) { + case map[string]any: + if _, ok := v["context"]; !ok { + v["context"] = "." + } + return v, nil + default: + return data, nil + } +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/transform/canonical.go b/vendor/github.com/compose-spec/compose-go/v2/transform/canonical.go new file mode 100644 index 00000000..d0525f02 --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/transform/canonical.go @@ -0,0 +1,137 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package transform + +import ( + "fmt" + + "github.com/compose-spec/compose-go/v2/tree" +) + +// Func is a function that can transform data at a specific path +type Func func(data any, p tree.Path, ignoreParseError bool) (any, error) + +var transformers = map[tree.Path]Func{} + +func init() { + transformers["services.*"] = transformService + transformers["services.*.build.secrets.*"] = transformFileMount + transformers["services.*.build.provenance"] = transformStringOrX + transformers["services.*.build.sbom"] = transformStringOrX + transformers["services.*.build.additional_contexts"] = transformKeyValue + transformers["services.*.depends_on"] = transformDependsOn + transformers["services.*.env_file"] = transformEnvFile + transformers["services.*.label_file"] = transformStringOrList + transformers["services.*.extends"] = transformExtends + transformers["services.*.gpus"] = transformGpus + transformers["services.*.networks"] = transformStringSliceToMap + transformers["services.*.models"] = transformStringSliceToMap + transformers["services.*.volumes.*"] = transformVolumeMount + transformers["services.*.dns"] = transformStringOrList + transformers["services.*.devices.*"] = transformDeviceMapping + transformers["services.*.secrets.*"] = transformFileMount + transformers["services.*.configs.*"] = transformFileMount + transformers["services.*.ports"] = transformPorts + transformers["services.*.build"] = transformBuild + transformers["services.*.build.ssh"] = transformSSH + transformers["services.*.ulimits.*"] = transformUlimits + transformers["services.*.build.ulimits.*"] = transformUlimits + transformers["services.*.develop.watch.*.ignore"] = transformStringOrList + transformers["services.*.develop.watch.*.include"] = transformStringOrList + transformers["volumes.*"] = transformMaybeExternal + transformers["networks.*"] = transformMaybeExternal + transformers["secrets.*"] = transformMaybeExternal + transformers["configs.*"] = transformMaybeExternal + transformers["include.*"] = transformInclude +} + +func transformStringOrList(data any, _ tree.Path, _ bool) (any, error) { + switch t := data.(type) { + case string: + return []any{t}, nil + default: + return data, nil + } +} + +// Canonical transforms a compose model into canonical syntax +func Canonical(yaml map[string]any, ignoreParseError bool) (map[string]any, error) { + canonical, err := transform(yaml, tree.NewPath(), ignoreParseError) + if err != nil { + return nil, err + } + return canonical.(map[string]any), nil +} + +func transform(data any, p tree.Path, ignoreParseError bool) (any, error) { + for pattern, transformer := range transformers { + if p.Matches(pattern) { + t, err := transformer(data, p, ignoreParseError) + if err != nil { + return nil, err + } + return t, nil + } + } + switch v := data.(type) { + case map[string]any: + a, err := transformMapping(v, p, ignoreParseError) + if err != nil { + return a, err + } + return v, nil + case []any: + a, err := transformSequence(v, p, ignoreParseError) + if err != nil { + return a, err + } + return v, nil + default: + return data, nil + } +} + +func transformSequence(v []any, p tree.Path, ignoreParseError bool) ([]any, error) { + for i, e := range v { + t, err := transform(e, p.Next("[]"), ignoreParseError) + if err != nil { + return nil, err + } + v[i] = t + } + return v, nil +} + +func transformMapping(v map[string]any, p tree.Path, ignoreParseError bool) (map[string]any, error) { + for k, e := range v { + t, err := transform(e, p.Next(k), ignoreParseError) + if err != nil { + return nil, err + } + v[k] = t + } + return v, nil +} + +func transformStringOrX(data any, _ tree.Path, _ bool) (any, error) { + switch v := data.(type) { + case string: + return v, nil + default: + return fmt.Sprint(v), nil + } +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/transform/defaults.go b/vendor/github.com/compose-spec/compose-go/v2/transform/defaults.go new file mode 100644 index 00000000..b82da694 --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/transform/defaults.go @@ -0,0 +1,97 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package transform + +import ( + "github.com/compose-spec/compose-go/v2/tree" +) + +// DefaultValues contains the default value transformers for compose fields +var DefaultValues = map[tree.Path]Func{} + +func init() { + DefaultValues["services.*.build"] = defaultBuildContext + DefaultValues["services.*.secrets.*"] = defaultSecretMount + DefaultValues["services.*.ports.*"] = portDefaults + DefaultValues["services.*.deploy.resources.reservations.devices.*"] = deviceRequestDefaults + DefaultValues["services.*.gpus.*"] = deviceRequestDefaults + DefaultValues["services.*.volumes.*.bind"] = defaultVolumeBind +} + +// RegisterDefaultValue registers a custom transformer for the given path pattern +func RegisterDefaultValue(path string, transformer Func) { + DefaultValues[tree.Path(path)] = transformer +} + +// SetDefaultValues transforms a compose model to set default values to missing attributes +func SetDefaultValues(yaml map[string]any) (map[string]any, error) { + result, err := setDefaults(yaml, tree.NewPath()) + if err != nil { + return nil, err + } + return result.(map[string]any), nil +} + +func setDefaults(data any, p tree.Path) (any, error) { + for pattern, transformer := range DefaultValues { + if p.Matches(pattern) { + t, err := transformer(data, p, false) + if err != nil { + return nil, err + } + return t, nil + } + } + switch v := data.(type) { + case map[string]any: + a, err := setDefaultsMapping(v, p) + if err != nil { + return a, err + } + return v, nil + case []any: + a, err := setDefaultsSequence(v, p) + if err != nil { + return a, err + } + return v, nil + default: + return data, nil + } +} + +func setDefaultsSequence(v []any, p tree.Path) ([]any, error) { + for i, e := range v { + t, err := setDefaults(e, p.Next("[]")) + if err != nil { + return nil, err + } + v[i] = t + } + return v, nil +} + +func setDefaultsMapping(v map[string]any, p tree.Path) (map[string]any, error) { + for k, e := range v { + t, err := setDefaults(e, p.Next(k)) + if err != nil { + return nil, err + } + v[k] = t + } + return v, nil +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/transform/dependson.go b/vendor/github.com/compose-spec/compose-go/v2/transform/dependson.go new file mode 100644 index 00000000..0a72ffa4 --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/transform/dependson.go @@ -0,0 +1,53 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package transform + +import ( + "fmt" + + "github.com/compose-spec/compose-go/v2/tree" +) + +func transformDependsOn(data any, p tree.Path, _ bool) (any, error) { + switch v := data.(type) { + case map[string]any: + for i, e := range v { + d, ok := e.(map[string]any) + if !ok { + return nil, fmt.Errorf("%s.%s: unsupported value %s", p, i, v) + } + if _, ok := d["condition"]; !ok { + d["condition"] = "service_started" + } + if _, ok := d["required"]; !ok { + d["required"] = true + } + } + return v, nil + case []any: + d := map[string]any{} + for _, k := range v { + d[k.(string)] = map[string]any{ + "condition": "service_started", + "required": true, + } + } + return d, nil + default: + return data, fmt.Errorf("%s: invalid type %T for depend_on", p, v) + } +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/transform/device.go b/vendor/github.com/compose-spec/compose-go/v2/transform/device.go new file mode 100644 index 00000000..351d8151 --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/transform/device.go @@ -0,0 +1,60 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package transform + +import ( + "fmt" + "strings" + + "github.com/compose-spec/compose-go/v2/tree" +) + +func transformDeviceMapping(data any, p tree.Path, ignoreParseError bool) (any, error) { + switch v := data.(type) { + case map[string]any: + return v, nil + case string: + src := "" + dst := "" + permissions := "rwm" + arr := strings.Split(v, ":") + switch len(arr) { + case 3: + permissions = arr[2] + fallthrough + case 2: + dst = arr[1] + fallthrough + case 1: + src = arr[0] + default: + if !ignoreParseError { + return nil, fmt.Errorf("confusing device mapping, please use long syntax: %s", v) + } + } + if dst == "" { + dst = src + } + return map[string]any{ + "source": src, + "target": dst, + "permissions": permissions, + }, nil + default: + return data, fmt.Errorf("%s: invalid type %T for service volume mount", p, v) + } +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/transform/devices.go b/vendor/github.com/compose-spec/compose-go/v2/transform/devices.go new file mode 100644 index 00000000..5de0613c --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/transform/devices.go @@ -0,0 +1,36 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package transform + +import ( + "fmt" + + "github.com/compose-spec/compose-go/v2/tree" +) + +func deviceRequestDefaults(data any, p tree.Path, _ bool) (any, error) { + v, ok := data.(map[string]any) + if !ok { + return data, fmt.Errorf("%s: invalid type %T for device request", p, v) + } + _, hasCount := v["count"] + _, hasIDs := v["device_ids"] + if !hasCount && !hasIDs { + v["count"] = "all" + } + return v, nil +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/transform/envfile.go b/vendor/github.com/compose-spec/compose-go/v2/transform/envfile.go new file mode 100644 index 00000000..e5100530 --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/transform/envfile.go @@ -0,0 +1,55 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package transform + +import ( + "fmt" + + "github.com/compose-spec/compose-go/v2/tree" +) + +func transformEnvFile(data any, p tree.Path, _ bool) (any, error) { + switch v := data.(type) { + case string: + return []any{ + transformEnvFileValue(v), + }, nil + case []any: + for i, e := range v { + v[i] = transformEnvFileValue(e) + } + return v, nil + default: + return nil, fmt.Errorf("%s: invalid type %T for env_file", p, v) + } +} + +func transformEnvFileValue(data any) any { + switch v := data.(type) { + case string: + return map[string]any{ + "path": v, + "required": true, + } + case map[string]any: + if _, ok := v["required"]; !ok { + v["required"] = true + } + return v + } + return nil +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/transform/extends.go b/vendor/github.com/compose-spec/compose-go/v2/transform/extends.go new file mode 100644 index 00000000..e0f9be2d --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/transform/extends.go @@ -0,0 +1,36 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package transform + +import ( + "fmt" + + "github.com/compose-spec/compose-go/v2/tree" +) + +func transformExtends(data any, p tree.Path, ignoreParseError bool) (any, error) { + switch v := data.(type) { + case map[string]any: + return transformMapping(v, p, ignoreParseError) + case string: + return map[string]any{ + "service": v, + }, nil + default: + return data, fmt.Errorf("%s: invalid type %T for extends", p, v) + } +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/transform/external.go b/vendor/github.com/compose-spec/compose-go/v2/transform/external.go new file mode 100644 index 00000000..be718f03 --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/transform/external.go @@ -0,0 +1,54 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package transform + +import ( + "fmt" + + "github.com/compose-spec/compose-go/v2/tree" + "github.com/sirupsen/logrus" +) + +func transformMaybeExternal(data any, p tree.Path, ignoreParseError bool) (any, error) { + if data == nil { + return nil, nil + } + resource, err := transformMapping(data.(map[string]any), p, ignoreParseError) + if err != nil { + return nil, err + } + + if ext, ok := resource["external"]; ok { + name, named := resource["name"] + if external, ok := ext.(map[string]any); ok { + resource["external"] = true + if extname, extNamed := external["name"]; extNamed { + logrus.Warnf("%s: external.name is deprecated. Please set name and external: true", p) + if named && extname != name { + return nil, fmt.Errorf("%s: name and external.name conflict; only use name", p) + } + if !named { + // adopt (deprecated) external.name if set + resource["name"] = extname + return resource, nil + } + } + } + } + + return resource, nil +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/transform/gpus.go b/vendor/github.com/compose-spec/compose-go/v2/transform/gpus.go new file mode 100644 index 00000000..3c91b63b --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/transform/gpus.go @@ -0,0 +1,38 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package transform + +import ( + "fmt" + + "github.com/compose-spec/compose-go/v2/tree" +) + +func transformGpus(data any, p tree.Path, ignoreParseError bool) (any, error) { + switch v := data.(type) { + case []any: + return transformSequence(v, p, ignoreParseError) + case string: + return []any{ + map[string]any{ + "count": "all", + }, + }, nil + default: + return data, fmt.Errorf("%s: invalid type %T for gpus", p, v) + } +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/transform/include.go b/vendor/github.com/compose-spec/compose-go/v2/transform/include.go new file mode 100644 index 00000000..8a80439e --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/transform/include.go @@ -0,0 +1,36 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package transform + +import ( + "fmt" + + "github.com/compose-spec/compose-go/v2/tree" +) + +func transformInclude(data any, p tree.Path, _ bool) (any, error) { + switch v := data.(type) { + case map[string]any: + return v, nil + case string: + return map[string]any{ + "path": v, + }, nil + default: + return data, fmt.Errorf("%s: invalid type %T for external", p, v) + } +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/transform/mapping.go b/vendor/github.com/compose-spec/compose-go/v2/transform/mapping.go new file mode 100644 index 00000000..007aa9ed --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/transform/mapping.go @@ -0,0 +1,46 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package transform + +import ( + "fmt" + "strings" + + "github.com/compose-spec/compose-go/v2/tree" +) + +func transformKeyValue(data any, p tree.Path, ignoreParseError bool) (any, error) { + switch v := data.(type) { + case map[string]any: + return v, nil + case []any: + mapping := map[string]any{} + for _, e := range v { + before, after, found := strings.Cut(e.(string), "=") + if !found { + if ignoreParseError { + return data, nil + } + return nil, fmt.Errorf("%s: invalid value %s, expected key=value", p, e) + } + mapping[before] = after + } + return mapping, nil + default: + return nil, fmt.Errorf("%s: invalid type %T", p, v) + } +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/transform/ports.go b/vendor/github.com/compose-spec/compose-go/v2/transform/ports.go new file mode 100644 index 00000000..68e26f3d --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/transform/ports.go @@ -0,0 +1,104 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package transform + +import ( + "fmt" + + "github.com/compose-spec/compose-go/v2/tree" + "github.com/compose-spec/compose-go/v2/types" + "github.com/go-viper/mapstructure/v2" +) + +func transformPorts(data any, p tree.Path, ignoreParseError bool) (any, error) { + switch entries := data.(type) { + case []any: + // We process the list instead of individual items here. + // The reason is that one entry might be mapped to multiple ServicePortConfig. + // Therefore we take an input of a list and return an output of a list. + var ports []any + for _, entry := range entries { + switch value := entry.(type) { + case int: + parsed, err := types.ParsePortConfig(fmt.Sprint(value)) + if err != nil { + return data, err + } + for _, v := range parsed { + m, err := encode(v) + if err != nil { + return nil, err + } + ports = append(ports, m) + } + case string: + parsed, err := types.ParsePortConfig(value) + if err != nil { + if ignoreParseError { + return data, nil + } + return nil, err + } + if err != nil { + return nil, err + } + for _, v := range parsed { + m, err := encode(v) + if err != nil { + return nil, err + } + ports = append(ports, m) + } + case map[string]any: + ports = append(ports, value) + default: + return data, fmt.Errorf("%s: invalid type %T for port", p, value) + } + } + return ports, nil + default: + return data, fmt.Errorf("%s: invalid type %T for port", p, entries) + } +} + +func encode(v any) (map[string]any, error) { + m := map[string]any{} + decoder, err := mapstructure.NewDecoder(&mapstructure.DecoderConfig{ + Result: &m, + TagName: "yaml", + }) + if err != nil { + return nil, err + } + err = decoder.Decode(v) + return m, err +} + +func portDefaults(data any, _ tree.Path, _ bool) (any, error) { + switch v := data.(type) { + case map[string]any: + if _, ok := v["protocol"]; !ok { + v["protocol"] = "tcp" + } + if _, ok := v["mode"]; !ok { + v["mode"] = "ingress" + } + return v, nil + default: + return data, nil + } +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/transform/secrets.go b/vendor/github.com/compose-spec/compose-go/v2/transform/secrets.go new file mode 100644 index 00000000..c2db1352 --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/transform/secrets.go @@ -0,0 +1,49 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package transform + +import ( + "fmt" + + "github.com/compose-spec/compose-go/v2/tree" +) + +func transformFileMount(data any, p tree.Path, _ bool) (any, error) { + switch v := data.(type) { + case map[string]any: + return data, nil + case string: + return map[string]any{ + "source": v, + }, nil + default: + return nil, fmt.Errorf("%s: unsupported type %T", p, data) + } +} + +func defaultSecretMount(data any, p tree.Path, _ bool) (any, error) { + switch v := data.(type) { + case map[string]any: + source := v["source"] + if _, ok := v["target"]; !ok { + v["target"] = fmt.Sprintf("/run/secrets/%s", source) + } + return v, nil + default: + return nil, fmt.Errorf("%s: unsupported type %T", p, data) + } +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/transform/services.go b/vendor/github.com/compose-spec/compose-go/v2/transform/services.go new file mode 100644 index 00000000..d9df42c8 --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/transform/services.go @@ -0,0 +1,41 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package transform + +import ( + "github.com/compose-spec/compose-go/v2/tree" +) + +func transformService(data any, p tree.Path, ignoreParseError bool) (any, error) { + switch value := data.(type) { + case map[string]any: + return transformMapping(value, p, ignoreParseError) + default: + return value, nil + } +} + +func transformStringSliceToMap(data any, _ tree.Path, _ bool) (any, error) { + if slice, ok := data.([]any); ok { + mapping := make(map[string]any, len(slice)) + for _, net := range slice { + mapping[net.(string)] = nil + } + return mapping, nil + } + return data, nil +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/transform/ssh.go b/vendor/github.com/compose-spec/compose-go/v2/transform/ssh.go new file mode 100644 index 00000000..2663461e --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/transform/ssh.go @@ -0,0 +1,51 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package transform + +import ( + "fmt" + "strings" + + "github.com/compose-spec/compose-go/v2/tree" +) + +func transformSSH(data any, p tree.Path, _ bool) (any, error) { + switch v := data.(type) { + case map[string]any: + return v, nil + case []any: + result := make(map[string]any, len(v)) + for _, e := range v { + s, ok := e.(string) + if !ok { + return nil, fmt.Errorf("invalid ssh key type %T", e) + } + id, path, ok := strings.Cut(s, "=") + if !ok { + if id != "default" { + return nil, fmt.Errorf("invalid ssh key %q", s) + } + result[id] = nil + continue + } + result[id] = path + } + return result, nil + default: + return data, fmt.Errorf("%s: invalid type %T for ssh", p, v) + } +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/transform/ulimits.go b/vendor/github.com/compose-spec/compose-go/v2/transform/ulimits.go new file mode 100644 index 00000000..57cce4fb --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/transform/ulimits.go @@ -0,0 +1,34 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package transform + +import ( + "fmt" + + "github.com/compose-spec/compose-go/v2/tree" +) + +func transformUlimits(data any, p tree.Path, _ bool) (any, error) { + switch v := data.(type) { + case map[string]any: + return v, nil + case int: + return v, nil + default: + return data, fmt.Errorf("%s: invalid type %T for external", p, v) + } +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/transform/volume.go b/vendor/github.com/compose-spec/compose-go/v2/transform/volume.go new file mode 100644 index 00000000..6aa59cf1 --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/transform/volume.go @@ -0,0 +1,63 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package transform + +import ( + "fmt" + "path" + + "github.com/compose-spec/compose-go/v2/format" + "github.com/compose-spec/compose-go/v2/tree" +) + +func transformVolumeMount(data any, p tree.Path, ignoreParseError bool) (any, error) { + switch v := data.(type) { + case map[string]any: + return v, nil + case string: + volume, err := format.ParseVolume(v) // TODO(ndeloof) ParseVolume should not rely on types and return map[string] + if err != nil { + if ignoreParseError { + return v, nil + } + return nil, err + } + volume.Target = cleanTarget(volume.Target) + + return encode(volume) + default: + return data, fmt.Errorf("%s: invalid type %T for service volume mount", p, v) + } +} + +func cleanTarget(target string) string { + if target == "" { + return "" + } + return path.Clean(target) +} + +func defaultVolumeBind(data any, p tree.Path, _ bool) (any, error) { + bind, ok := data.(map[string]any) + if !ok { + return data, fmt.Errorf("%s: invalid type %T for service volume bind", p, data) + } + if _, ok := bind["create_host_path"]; !ok { + bind["create_host_path"] = true + } + return bind, nil +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/tree/path.go b/vendor/github.com/compose-spec/compose-go/v2/tree/path.go new file mode 100644 index 00000000..f8a8d9a6 --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/tree/path.go @@ -0,0 +1,87 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package tree + +import ( + "strings" +) + +const pathSeparator = "." + +// PathMatchAll is a token used as part of a Path to match any key at that level +// in the nested structure +const PathMatchAll = "*" + +// PathMatchList is a token used as part of a Path to match items in a list +const PathMatchList = "[]" + +// Path is a dotted path of keys to a value in a nested mapping structure. A * +// section in a path will match any key in the mapping structure. +type Path string + +// NewPath returns a new Path +func NewPath(items ...string) Path { + return Path(strings.Join(items, pathSeparator)) +} + +// Next returns a new path by append part to the current path +func (p Path) Next(part string) Path { + if p == "" { + return Path(part) + } + part = strings.ReplaceAll(part, pathSeparator, "👻") + return Path(string(p) + pathSeparator + part) +} + +func (p Path) Parts() []string { + return strings.Split(string(p), pathSeparator) +} + +func (p Path) Matches(pattern Path) bool { + patternParts := pattern.Parts() + parts := p.Parts() + + if len(patternParts) != len(parts) { + return false + } + for index, part := range parts { + switch patternParts[index] { + case PathMatchAll, part: + continue + default: + return false + } + } + return true +} + +func (p Path) Last() string { + parts := p.Parts() + return parts[len(parts)-1] +} + +func (p Path) Parent() Path { + index := strings.LastIndex(string(p), pathSeparator) + if index > 0 { + return p[0:index] + } + return "" +} + +func (p Path) String() string { + return strings.ReplaceAll(string(p), "👻", pathSeparator) +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/types/build.go b/vendor/github.com/compose-spec/compose-go/v2/types/build.go new file mode 100644 index 00000000..98931400 --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/types/build.go @@ -0,0 +1,48 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package types + +// BuildConfig is a type for build +type BuildConfig struct { + Context string `yaml:"context,omitempty" json:"context,omitempty"` + Dockerfile string `yaml:"dockerfile,omitempty" json:"dockerfile,omitempty"` + DockerfileInline string `yaml:"dockerfile_inline,omitempty" json:"dockerfile_inline,omitempty"` + Entitlements []string `yaml:"entitlements,omitempty" json:"entitlements,omitempty"` + Args MappingWithEquals `yaml:"args,omitempty" json:"args,omitempty"` + Provenance string `yaml:"provenance,omitempty" json:"provenance,omitempty"` + SBOM string `yaml:"sbom,omitempty" json:"sbom,omitempty"` + SSH SSHConfig `yaml:"ssh,omitempty" json:"ssh,omitempty"` + Labels Labels `yaml:"labels,omitempty" json:"labels,omitempty"` + CacheFrom StringList `yaml:"cache_from,omitempty" json:"cache_from,omitempty"` + CacheTo StringList `yaml:"cache_to,omitempty" json:"cache_to,omitempty"` + NoCache bool `yaml:"no_cache,omitempty" json:"no_cache,omitempty"` + NoCacheFilter StringList `yaml:"no_cache_filter,omitempty" json:"no_cache_filter,omitempty"` + AdditionalContexts Mapping `yaml:"additional_contexts,omitempty" json:"additional_contexts,omitempty"` + Pull bool `yaml:"pull,omitempty" json:"pull,omitempty"` + ExtraHosts HostsList `yaml:"extra_hosts,omitempty" json:"extra_hosts,omitempty"` + Isolation string `yaml:"isolation,omitempty" json:"isolation,omitempty"` + Network string `yaml:"network,omitempty" json:"network,omitempty"` + Target string `yaml:"target,omitempty" json:"target,omitempty"` + Secrets []ServiceSecretConfig `yaml:"secrets,omitempty" json:"secrets,omitempty"` + ShmSize UnitBytes `yaml:"shm_size,omitempty" json:"shm_size,omitempty"` + Tags StringList `yaml:"tags,omitempty" json:"tags,omitempty"` + Ulimits map[string]*UlimitsConfig `yaml:"ulimits,omitempty" json:"ulimits,omitempty"` + Platforms StringList `yaml:"platforms,omitempty" json:"platforms,omitempty"` + Privileged bool `yaml:"privileged,omitempty" json:"privileged,omitempty"` + + Extensions Extensions `yaml:"#extensions,inline,omitempty" json:"-"` +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/types/bytes.go b/vendor/github.com/compose-spec/compose-go/v2/types/bytes.go new file mode 100644 index 00000000..1b2cd419 --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/types/bytes.go @@ -0,0 +1,48 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package types + +import ( + "fmt" + + "github.com/docker/go-units" +) + +// UnitBytes is the bytes type +type UnitBytes int64 + +// MarshalYAML makes UnitBytes implement yaml.Marshaller +func (u UnitBytes) MarshalYAML() (interface{}, error) { + return fmt.Sprintf("%d", u), nil +} + +// MarshalJSON makes UnitBytes implement json.Marshaler +func (u UnitBytes) MarshalJSON() ([]byte, error) { + return []byte(fmt.Sprintf(`"%d"`, u)), nil +} + +func (u *UnitBytes) DecodeMapstructure(value interface{}) error { + switch v := value.(type) { + case int: + *u = UnitBytes(v) + case string: + b, err := units.RAMInBytes(fmt.Sprint(value)) + *u = UnitBytes(b) + return err + } + return nil +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/types/command.go b/vendor/github.com/compose-spec/compose-go/v2/types/command.go new file mode 100644 index 00000000..559dc305 --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/types/command.go @@ -0,0 +1,86 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package types + +import "github.com/mattn/go-shellwords" + +// ShellCommand is a string or list of string args. +// +// When marshaled to YAML, nil command fields will be omitted if `omitempty` +// is specified as a struct tag. Explicitly empty commands (i.e. `[]` or +// empty string will serialize to an empty array (`[]`). +// +// When marshaled to JSON, the `omitempty` struct must NOT be specified. +// If the command field is nil, it will be serialized as `null`. +// Explicitly empty commands (i.e. `[]` or empty string) will serialize to +// an empty array (`[]`). +// +// The distinction between nil and explicitly empty is important to distinguish +// between an unset value and a provided, but empty, value, which should be +// preserved so that it can override any base value (e.g. container entrypoint). +// +// The different semantics between YAML and JSON are due to limitations with +// JSON marshaling + `omitempty` in the Go stdlib, while go.yaml.in/yaml/v4 gives +// us more flexibility via the yaml.IsZeroer interface. +// +// In the future, it might make sense to make fields of this type be +// `*ShellCommand` to avoid this situation, but that would constitute a +// breaking change. +type ShellCommand []string + +// IsZero returns true if the slice is nil. +// +// Empty (but non-nil) slices are NOT considered zero values. +func (s ShellCommand) IsZero() bool { + // we do NOT want len(s) == 0, ONLY explicitly nil + return s == nil +} + +// MarshalYAML returns nil (which will be serialized as `null`) for nil slices +// and delegates to the standard marshaller behavior otherwise. +// +// NOTE: Typically the nil case here is not hit because IsZero has already +// short-circuited marshalling, but this ensures that the type serializes +// accurately if the `omitempty` struct tag is omitted/forgotten. +// +// A similar MarshalJSON() implementation is not needed because the Go stdlib +// already serializes nil slices to `null`, whereas go.yaml.in/yaml/v4 by default +// serializes nil slices to `[]`. +func (s ShellCommand) MarshalYAML() (interface{}, error) { + if s == nil { + return nil, nil + } + return []string(s), nil +} + +func (s *ShellCommand) DecodeMapstructure(value interface{}) error { + switch v := value.(type) { + case string: + cmd, err := shellwords.Parse(v) + if err != nil { + return err + } + *s = cmd + case []interface{}: + cmd := make([]string, len(v)) + for i, s := range v { + cmd[i] = s.(string) + } + *s = cmd + } + return nil +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/types/config.go b/vendor/github.com/compose-spec/compose-go/v2/types/config.go new file mode 100644 index 00000000..9a0fdaf2 --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/types/config.go @@ -0,0 +1,145 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package types + +import ( + "encoding/json" + "runtime" + "strings" + + "github.com/go-viper/mapstructure/v2" +) + +// isCaseInsensitiveEnvVars is true on platforms where environment variable names are treated case-insensitively. +var isCaseInsensitiveEnvVars = (runtime.GOOS == "windows") + +// ConfigDetails are the details about a group of ConfigFiles +type ConfigDetails struct { + Version string + WorkingDir string + ConfigFiles []ConfigFile + Environment Mapping +} + +// LookupEnv provides a lookup function for environment variables +func (cd *ConfigDetails) LookupEnv(key string) (string, bool) { + v, ok := cd.Environment[key] + if !isCaseInsensitiveEnvVars || ok { + return v, ok + } + // variable names must be treated case-insensitively on some platforms (that is, Windows). + // Resolves in this way: + // * Return the value if its name matches with the passed name case-sensitively. + // * Otherwise, return the value if its lower-cased name matches lower-cased passed name. + // * The value is indefinite if multiple variables match. + lowerKey := strings.ToLower(key) + for k, v := range cd.Environment { + if strings.ToLower(k) == lowerKey { + return v, true + } + } + return "", false +} + +// ConfigFile is a filename and the contents of the file as a Dict +type ConfigFile struct { + // Filename is the name of the yaml configuration file + Filename string + // Content is the raw yaml content. Will be loaded from Filename if not set + Content []byte + // Config if the yaml tree for this config file. Will be parsed from Content if not set + Config map[string]interface{} +} + +func (cf ConfigFile) IsStdin() bool { + return cf.Filename == "-" +} + +func ToConfigFiles(path []string) (f []ConfigFile) { + for _, p := range path { + f = append(f, ConfigFile{Filename: p}) + } + return +} + +// Config is a full compose file configuration and model +type Config struct { + Filename string `yaml:"-" json:"-"` + Name string `yaml:"name,omitempty" json:"name,omitempty"` + Services Services `yaml:"services" json:"services"` + Networks Networks `yaml:"networks,omitempty" json:"networks,omitempty"` + Volumes Volumes `yaml:"volumes,omitempty" json:"volumes,omitempty"` + Secrets Secrets `yaml:"secrets,omitempty" json:"secrets,omitempty"` + Configs Configs `yaml:"configs,omitempty" json:"configs,omitempty"` + Extensions Extensions `yaml:",inline" json:"-"` + Include []IncludeConfig `yaml:"include,omitempty" json:"include,omitempty"` +} + +// Volumes is a map of VolumeConfig +type Volumes map[string]VolumeConfig + +// Networks is a map of NetworkConfig +type Networks map[string]NetworkConfig + +// Secrets is a map of SecretConfig +type Secrets map[string]SecretConfig + +// Configs is a map of ConfigObjConfig +type Configs map[string]ConfigObjConfig + +type Models map[string]ModelConfig + +// Extensions is a map of custom extension +type Extensions map[string]any + +func (e Extensions) DeepCopy(t Extensions) { + for k, v := range e { + t[k] = v + } +} + +// MarshalJSON makes Config implement json.Marshaler +func (c Config) MarshalJSON() ([]byte, error) { + m := map[string]interface{}{ + "services": c.Services, + } + + if len(c.Networks) > 0 { + m["networks"] = c.Networks + } + if len(c.Volumes) > 0 { + m["volumes"] = c.Volumes + } + if len(c.Secrets) > 0 { + m["secrets"] = c.Secrets + } + if len(c.Configs) > 0 { + m["configs"] = c.Configs + } + for k, v := range c.Extensions { + m[k] = v + } + return json.Marshal(m) +} + +func (e Extensions) Get(name string, target interface{}) (bool, error) { + if v, ok := e[name]; ok { + err := mapstructure.Decode(v, target) + return true, err + } + return false, nil +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/types/cpus.go b/vendor/github.com/compose-spec/compose-go/v2/types/cpus.go new file mode 100644 index 00000000..f32c6e62 --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/types/cpus.go @@ -0,0 +1,48 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package types + +import ( + "fmt" + "strconv" +) + +type NanoCPUs float32 + +func (n *NanoCPUs) DecodeMapstructure(a any) error { + switch v := a.(type) { + case string: + f, err := strconv.ParseFloat(v, 64) + if err != nil { + return err + } + *n = NanoCPUs(f) + case int: + *n = NanoCPUs(v) + case float32: + *n = NanoCPUs(v) + case float64: + *n = NanoCPUs(v) + default: + return fmt.Errorf("unexpected value type %T for cpus", v) + } + return nil +} + +func (n *NanoCPUs) Value() float32 { + return float32(*n) +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/types/derived.gen.go b/vendor/github.com/compose-spec/compose-go/v2/types/derived.gen.go new file mode 100644 index 00000000..e284fa9f --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/types/derived.gen.go @@ -0,0 +1,2423 @@ +// Code generated by goderive DO NOT EDIT. + +package types + +// deriveDeepCopyProject recursively copies the contents of src into dst. +func deriveDeepCopyProject(dst, src *Project) { + dst.Name = src.Name + dst.WorkingDir = src.WorkingDir + if src.Services != nil { + dst.Services = make(map[string]ServiceConfig, len(src.Services)) + deriveDeepCopy(dst.Services, src.Services) + } else { + dst.Services = nil + } + if src.Networks != nil { + dst.Networks = make(map[string]NetworkConfig, len(src.Networks)) + deriveDeepCopy_(dst.Networks, src.Networks) + } else { + dst.Networks = nil + } + if src.Volumes != nil { + dst.Volumes = make(map[string]VolumeConfig, len(src.Volumes)) + deriveDeepCopy_1(dst.Volumes, src.Volumes) + } else { + dst.Volumes = nil + } + if src.Secrets != nil { + dst.Secrets = make(map[string]SecretConfig, len(src.Secrets)) + deriveDeepCopy_2(dst.Secrets, src.Secrets) + } else { + dst.Secrets = nil + } + if src.Configs != nil { + dst.Configs = make(map[string]ConfigObjConfig, len(src.Configs)) + deriveDeepCopy_3(dst.Configs, src.Configs) + } else { + dst.Configs = nil + } + if src.Models != nil { + dst.Models = make(map[string]ModelConfig, len(src.Models)) + deriveDeepCopy_4(dst.Models, src.Models) + } else { + dst.Models = nil + } + if src.Extensions != nil { + dst.Extensions = make(map[string]any, len(src.Extensions)) + src.Extensions.DeepCopy(dst.Extensions) + } else { + dst.Extensions = nil + } + if src.ComposeFiles == nil { + dst.ComposeFiles = nil + } else { + if dst.ComposeFiles != nil { + if len(src.ComposeFiles) > len(dst.ComposeFiles) { + if cap(dst.ComposeFiles) >= len(src.ComposeFiles) { + dst.ComposeFiles = (dst.ComposeFiles)[:len(src.ComposeFiles)] + } else { + dst.ComposeFiles = make([]string, len(src.ComposeFiles)) + } + } else if len(src.ComposeFiles) < len(dst.ComposeFiles) { + dst.ComposeFiles = (dst.ComposeFiles)[:len(src.ComposeFiles)] + } + } else { + dst.ComposeFiles = make([]string, len(src.ComposeFiles)) + } + copy(dst.ComposeFiles, src.ComposeFiles) + } + if src.Environment != nil { + dst.Environment = make(map[string]string, len(src.Environment)) + deriveDeepCopy_5(dst.Environment, src.Environment) + } else { + dst.Environment = nil + } + if src.DisabledServices != nil { + dst.DisabledServices = make(map[string]ServiceConfig, len(src.DisabledServices)) + deriveDeepCopy(dst.DisabledServices, src.DisabledServices) + } else { + dst.DisabledServices = nil + } + if src.Profiles == nil { + dst.Profiles = nil + } else { + if dst.Profiles != nil { + if len(src.Profiles) > len(dst.Profiles) { + if cap(dst.Profiles) >= len(src.Profiles) { + dst.Profiles = (dst.Profiles)[:len(src.Profiles)] + } else { + dst.Profiles = make([]string, len(src.Profiles)) + } + } else if len(src.Profiles) < len(dst.Profiles) { + dst.Profiles = (dst.Profiles)[:len(src.Profiles)] + } + } else { + dst.Profiles = make([]string, len(src.Profiles)) + } + copy(dst.Profiles, src.Profiles) + } +} + +// deriveDeepCopyService recursively copies the contents of src into dst. +func deriveDeepCopyService(dst, src *ServiceConfig) { + dst.Name = src.Name + if src.Profiles == nil { + dst.Profiles = nil + } else { + if dst.Profiles != nil { + if len(src.Profiles) > len(dst.Profiles) { + if cap(dst.Profiles) >= len(src.Profiles) { + dst.Profiles = (dst.Profiles)[:len(src.Profiles)] + } else { + dst.Profiles = make([]string, len(src.Profiles)) + } + } else if len(src.Profiles) < len(dst.Profiles) { + dst.Profiles = (dst.Profiles)[:len(src.Profiles)] + } + } else { + dst.Profiles = make([]string, len(src.Profiles)) + } + copy(dst.Profiles, src.Profiles) + } + if src.Annotations != nil { + dst.Annotations = make(map[string]string, len(src.Annotations)) + deriveDeepCopy_5(dst.Annotations, src.Annotations) + } else { + dst.Annotations = nil + } + if src.Attach == nil { + dst.Attach = nil + } else { + dst.Attach = new(bool) + *dst.Attach = *src.Attach + } + if src.Build == nil { + dst.Build = nil + } else { + dst.Build = new(BuildConfig) + deriveDeepCopy_6(dst.Build, src.Build) + } + if src.Develop == nil { + dst.Develop = nil + } else { + dst.Develop = new(DevelopConfig) + deriveDeepCopy_7(dst.Develop, src.Develop) + } + if src.BlkioConfig == nil { + dst.BlkioConfig = nil + } else { + dst.BlkioConfig = new(BlkioConfig) + deriveDeepCopy_8(dst.BlkioConfig, src.BlkioConfig) + } + if src.CapAdd == nil { + dst.CapAdd = nil + } else { + if dst.CapAdd != nil { + if len(src.CapAdd) > len(dst.CapAdd) { + if cap(dst.CapAdd) >= len(src.CapAdd) { + dst.CapAdd = (dst.CapAdd)[:len(src.CapAdd)] + } else { + dst.CapAdd = make([]string, len(src.CapAdd)) + } + } else if len(src.CapAdd) < len(dst.CapAdd) { + dst.CapAdd = (dst.CapAdd)[:len(src.CapAdd)] + } + } else { + dst.CapAdd = make([]string, len(src.CapAdd)) + } + copy(dst.CapAdd, src.CapAdd) + } + if src.CapDrop == nil { + dst.CapDrop = nil + } else { + if dst.CapDrop != nil { + if len(src.CapDrop) > len(dst.CapDrop) { + if cap(dst.CapDrop) >= len(src.CapDrop) { + dst.CapDrop = (dst.CapDrop)[:len(src.CapDrop)] + } else { + dst.CapDrop = make([]string, len(src.CapDrop)) + } + } else if len(src.CapDrop) < len(dst.CapDrop) { + dst.CapDrop = (dst.CapDrop)[:len(src.CapDrop)] + } + } else { + dst.CapDrop = make([]string, len(src.CapDrop)) + } + copy(dst.CapDrop, src.CapDrop) + } + dst.CgroupParent = src.CgroupParent + dst.Cgroup = src.Cgroup + dst.CPUCount = src.CPUCount + dst.CPUPercent = src.CPUPercent + dst.CPUPeriod = src.CPUPeriod + dst.CPUQuota = src.CPUQuota + dst.CPURTPeriod = src.CPURTPeriod + dst.CPURTRuntime = src.CPURTRuntime + dst.CPUS = src.CPUS + dst.CPUSet = src.CPUSet + dst.CPUShares = src.CPUShares + if src.Command == nil { + dst.Command = nil + } else { + if dst.Command != nil { + if len(src.Command) > len(dst.Command) { + if cap(dst.Command) >= len(src.Command) { + dst.Command = (dst.Command)[:len(src.Command)] + } else { + dst.Command = make([]string, len(src.Command)) + } + } else if len(src.Command) < len(dst.Command) { + dst.Command = (dst.Command)[:len(src.Command)] + } + } else { + dst.Command = make([]string, len(src.Command)) + } + copy(dst.Command, src.Command) + } + if src.Configs == nil { + dst.Configs = nil + } else { + if dst.Configs != nil { + if len(src.Configs) > len(dst.Configs) { + if cap(dst.Configs) >= len(src.Configs) { + dst.Configs = (dst.Configs)[:len(src.Configs)] + } else { + dst.Configs = make([]ServiceConfigObjConfig, len(src.Configs)) + } + } else if len(src.Configs) < len(dst.Configs) { + dst.Configs = (dst.Configs)[:len(src.Configs)] + } + } else { + dst.Configs = make([]ServiceConfigObjConfig, len(src.Configs)) + } + deriveDeepCopy_9(dst.Configs, src.Configs) + } + dst.ContainerName = src.ContainerName + if src.CredentialSpec == nil { + dst.CredentialSpec = nil + } else { + dst.CredentialSpec = new(CredentialSpecConfig) + deriveDeepCopy_10(dst.CredentialSpec, src.CredentialSpec) + } + if src.DependsOn != nil { + dst.DependsOn = make(map[string]ServiceDependency, len(src.DependsOn)) + deriveDeepCopy_11(dst.DependsOn, src.DependsOn) + } else { + dst.DependsOn = nil + } + if src.Deploy == nil { + dst.Deploy = nil + } else { + dst.Deploy = new(DeployConfig) + deriveDeepCopy_12(dst.Deploy, src.Deploy) + } + if src.DeviceCgroupRules == nil { + dst.DeviceCgroupRules = nil + } else { + if dst.DeviceCgroupRules != nil { + if len(src.DeviceCgroupRules) > len(dst.DeviceCgroupRules) { + if cap(dst.DeviceCgroupRules) >= len(src.DeviceCgroupRules) { + dst.DeviceCgroupRules = (dst.DeviceCgroupRules)[:len(src.DeviceCgroupRules)] + } else { + dst.DeviceCgroupRules = make([]string, len(src.DeviceCgroupRules)) + } + } else if len(src.DeviceCgroupRules) < len(dst.DeviceCgroupRules) { + dst.DeviceCgroupRules = (dst.DeviceCgroupRules)[:len(src.DeviceCgroupRules)] + } + } else { + dst.DeviceCgroupRules = make([]string, len(src.DeviceCgroupRules)) + } + copy(dst.DeviceCgroupRules, src.DeviceCgroupRules) + } + if src.Devices == nil { + dst.Devices = nil + } else { + if dst.Devices != nil { + if len(src.Devices) > len(dst.Devices) { + if cap(dst.Devices) >= len(src.Devices) { + dst.Devices = (dst.Devices)[:len(src.Devices)] + } else { + dst.Devices = make([]DeviceMapping, len(src.Devices)) + } + } else if len(src.Devices) < len(dst.Devices) { + dst.Devices = (dst.Devices)[:len(src.Devices)] + } + } else { + dst.Devices = make([]DeviceMapping, len(src.Devices)) + } + deriveDeepCopy_13(dst.Devices, src.Devices) + } + if src.DNS == nil { + dst.DNS = nil + } else { + if dst.DNS != nil { + if len(src.DNS) > len(dst.DNS) { + if cap(dst.DNS) >= len(src.DNS) { + dst.DNS = (dst.DNS)[:len(src.DNS)] + } else { + dst.DNS = make([]string, len(src.DNS)) + } + } else if len(src.DNS) < len(dst.DNS) { + dst.DNS = (dst.DNS)[:len(src.DNS)] + } + } else { + dst.DNS = make([]string, len(src.DNS)) + } + copy(dst.DNS, src.DNS) + } + if src.DNSOpts == nil { + dst.DNSOpts = nil + } else { + if dst.DNSOpts != nil { + if len(src.DNSOpts) > len(dst.DNSOpts) { + if cap(dst.DNSOpts) >= len(src.DNSOpts) { + dst.DNSOpts = (dst.DNSOpts)[:len(src.DNSOpts)] + } else { + dst.DNSOpts = make([]string, len(src.DNSOpts)) + } + } else if len(src.DNSOpts) < len(dst.DNSOpts) { + dst.DNSOpts = (dst.DNSOpts)[:len(src.DNSOpts)] + } + } else { + dst.DNSOpts = make([]string, len(src.DNSOpts)) + } + copy(dst.DNSOpts, src.DNSOpts) + } + if src.DNSSearch == nil { + dst.DNSSearch = nil + } else { + if dst.DNSSearch != nil { + if len(src.DNSSearch) > len(dst.DNSSearch) { + if cap(dst.DNSSearch) >= len(src.DNSSearch) { + dst.DNSSearch = (dst.DNSSearch)[:len(src.DNSSearch)] + } else { + dst.DNSSearch = make([]string, len(src.DNSSearch)) + } + } else if len(src.DNSSearch) < len(dst.DNSSearch) { + dst.DNSSearch = (dst.DNSSearch)[:len(src.DNSSearch)] + } + } else { + dst.DNSSearch = make([]string, len(src.DNSSearch)) + } + copy(dst.DNSSearch, src.DNSSearch) + } + dst.Dockerfile = src.Dockerfile + dst.DomainName = src.DomainName + if src.Entrypoint == nil { + dst.Entrypoint = nil + } else { + if dst.Entrypoint != nil { + if len(src.Entrypoint) > len(dst.Entrypoint) { + if cap(dst.Entrypoint) >= len(src.Entrypoint) { + dst.Entrypoint = (dst.Entrypoint)[:len(src.Entrypoint)] + } else { + dst.Entrypoint = make([]string, len(src.Entrypoint)) + } + } else if len(src.Entrypoint) < len(dst.Entrypoint) { + dst.Entrypoint = (dst.Entrypoint)[:len(src.Entrypoint)] + } + } else { + dst.Entrypoint = make([]string, len(src.Entrypoint)) + } + copy(dst.Entrypoint, src.Entrypoint) + } + if src.Provider == nil { + dst.Provider = nil + } else { + dst.Provider = new(ServiceProviderConfig) + deriveDeepCopy_14(dst.Provider, src.Provider) + } + if src.Environment != nil { + dst.Environment = make(map[string]*string, len(src.Environment)) + deriveDeepCopy_15(dst.Environment, src.Environment) + } else { + dst.Environment = nil + } + if src.EnvFiles == nil { + dst.EnvFiles = nil + } else { + if dst.EnvFiles != nil { + if len(src.EnvFiles) > len(dst.EnvFiles) { + if cap(dst.EnvFiles) >= len(src.EnvFiles) { + dst.EnvFiles = (dst.EnvFiles)[:len(src.EnvFiles)] + } else { + dst.EnvFiles = make([]EnvFile, len(src.EnvFiles)) + } + } else if len(src.EnvFiles) < len(dst.EnvFiles) { + dst.EnvFiles = (dst.EnvFiles)[:len(src.EnvFiles)] + } + } else { + dst.EnvFiles = make([]EnvFile, len(src.EnvFiles)) + } + copy(dst.EnvFiles, src.EnvFiles) + } + if src.Expose == nil { + dst.Expose = nil + } else { + if dst.Expose != nil { + if len(src.Expose) > len(dst.Expose) { + if cap(dst.Expose) >= len(src.Expose) { + dst.Expose = (dst.Expose)[:len(src.Expose)] + } else { + dst.Expose = make([]string, len(src.Expose)) + } + } else if len(src.Expose) < len(dst.Expose) { + dst.Expose = (dst.Expose)[:len(src.Expose)] + } + } else { + dst.Expose = make([]string, len(src.Expose)) + } + copy(dst.Expose, src.Expose) + } + if src.Extends == nil { + dst.Extends = nil + } else { + dst.Extends = new(ExtendsConfig) + *dst.Extends = *src.Extends + } + if src.ExternalLinks == nil { + dst.ExternalLinks = nil + } else { + if dst.ExternalLinks != nil { + if len(src.ExternalLinks) > len(dst.ExternalLinks) { + if cap(dst.ExternalLinks) >= len(src.ExternalLinks) { + dst.ExternalLinks = (dst.ExternalLinks)[:len(src.ExternalLinks)] + } else { + dst.ExternalLinks = make([]string, len(src.ExternalLinks)) + } + } else if len(src.ExternalLinks) < len(dst.ExternalLinks) { + dst.ExternalLinks = (dst.ExternalLinks)[:len(src.ExternalLinks)] + } + } else { + dst.ExternalLinks = make([]string, len(src.ExternalLinks)) + } + copy(dst.ExternalLinks, src.ExternalLinks) + } + if src.ExtraHosts != nil { + dst.ExtraHosts = make(map[string][]string, len(src.ExtraHosts)) + deriveDeepCopy_16(dst.ExtraHosts, src.ExtraHosts) + } else { + dst.ExtraHosts = nil + } + if src.GroupAdd == nil { + dst.GroupAdd = nil + } else { + if dst.GroupAdd != nil { + if len(src.GroupAdd) > len(dst.GroupAdd) { + if cap(dst.GroupAdd) >= len(src.GroupAdd) { + dst.GroupAdd = (dst.GroupAdd)[:len(src.GroupAdd)] + } else { + dst.GroupAdd = make([]string, len(src.GroupAdd)) + } + } else if len(src.GroupAdd) < len(dst.GroupAdd) { + dst.GroupAdd = (dst.GroupAdd)[:len(src.GroupAdd)] + } + } else { + dst.GroupAdd = make([]string, len(src.GroupAdd)) + } + copy(dst.GroupAdd, src.GroupAdd) + } + if src.Gpus == nil { + dst.Gpus = nil + } else { + if dst.Gpus != nil { + if len(src.Gpus) > len(dst.Gpus) { + if cap(dst.Gpus) >= len(src.Gpus) { + dst.Gpus = (dst.Gpus)[:len(src.Gpus)] + } else { + dst.Gpus = make([]DeviceRequest, len(src.Gpus)) + } + } else if len(src.Gpus) < len(dst.Gpus) { + dst.Gpus = (dst.Gpus)[:len(src.Gpus)] + } + } else { + dst.Gpus = make([]DeviceRequest, len(src.Gpus)) + } + deriveDeepCopy_17(dst.Gpus, src.Gpus) + } + dst.Hostname = src.Hostname + if src.HealthCheck == nil { + dst.HealthCheck = nil + } else { + dst.HealthCheck = new(HealthCheckConfig) + deriveDeepCopy_18(dst.HealthCheck, src.HealthCheck) + } + dst.Image = src.Image + if src.Init == nil { + dst.Init = nil + } else { + dst.Init = new(bool) + *dst.Init = *src.Init + } + dst.Ipc = src.Ipc + dst.Isolation = src.Isolation + if src.Labels != nil { + dst.Labels = make(map[string]string, len(src.Labels)) + deriveDeepCopy_5(dst.Labels, src.Labels) + } else { + dst.Labels = nil + } + if src.LabelFiles == nil { + dst.LabelFiles = nil + } else { + if dst.LabelFiles != nil { + if len(src.LabelFiles) > len(dst.LabelFiles) { + if cap(dst.LabelFiles) >= len(src.LabelFiles) { + dst.LabelFiles = (dst.LabelFiles)[:len(src.LabelFiles)] + } else { + dst.LabelFiles = make([]string, len(src.LabelFiles)) + } + } else if len(src.LabelFiles) < len(dst.LabelFiles) { + dst.LabelFiles = (dst.LabelFiles)[:len(src.LabelFiles)] + } + } else { + dst.LabelFiles = make([]string, len(src.LabelFiles)) + } + copy(dst.LabelFiles, src.LabelFiles) + } + if src.CustomLabels != nil { + dst.CustomLabels = make(map[string]string, len(src.CustomLabels)) + deriveDeepCopy_5(dst.CustomLabels, src.CustomLabels) + } else { + dst.CustomLabels = nil + } + if src.Links == nil { + dst.Links = nil + } else { + if dst.Links != nil { + if len(src.Links) > len(dst.Links) { + if cap(dst.Links) >= len(src.Links) { + dst.Links = (dst.Links)[:len(src.Links)] + } else { + dst.Links = make([]string, len(src.Links)) + } + } else if len(src.Links) < len(dst.Links) { + dst.Links = (dst.Links)[:len(src.Links)] + } + } else { + dst.Links = make([]string, len(src.Links)) + } + copy(dst.Links, src.Links) + } + if src.Logging == nil { + dst.Logging = nil + } else { + dst.Logging = new(LoggingConfig) + deriveDeepCopy_19(dst.Logging, src.Logging) + } + dst.LogDriver = src.LogDriver + if src.LogOpt != nil { + dst.LogOpt = make(map[string]string, len(src.LogOpt)) + deriveDeepCopy_5(dst.LogOpt, src.LogOpt) + } else { + dst.LogOpt = nil + } + dst.MemLimit = src.MemLimit + dst.MemReservation = src.MemReservation + dst.MemSwapLimit = src.MemSwapLimit + dst.MemSwappiness = src.MemSwappiness + dst.MacAddress = src.MacAddress + if src.Models != nil { + dst.Models = make(map[string]*ServiceModelConfig, len(src.Models)) + deriveDeepCopy_20(dst.Models, src.Models) + } else { + dst.Models = nil + } + dst.Net = src.Net + dst.NetworkMode = src.NetworkMode + if src.Networks != nil { + dst.Networks = make(map[string]*ServiceNetworkConfig, len(src.Networks)) + deriveDeepCopy_21(dst.Networks, src.Networks) + } else { + dst.Networks = nil + } + dst.OomKillDisable = src.OomKillDisable + dst.OomScoreAdj = src.OomScoreAdj + dst.Pid = src.Pid + dst.PidsLimit = src.PidsLimit + dst.Platform = src.Platform + if src.Ports == nil { + dst.Ports = nil + } else { + if dst.Ports != nil { + if len(src.Ports) > len(dst.Ports) { + if cap(dst.Ports) >= len(src.Ports) { + dst.Ports = (dst.Ports)[:len(src.Ports)] + } else { + dst.Ports = make([]ServicePortConfig, len(src.Ports)) + } + } else if len(src.Ports) < len(dst.Ports) { + dst.Ports = (dst.Ports)[:len(src.Ports)] + } + } else { + dst.Ports = make([]ServicePortConfig, len(src.Ports)) + } + deriveDeepCopy_22(dst.Ports, src.Ports) + } + dst.Privileged = src.Privileged + dst.PullPolicy = src.PullPolicy + dst.ReadOnly = src.ReadOnly + dst.Restart = src.Restart + dst.Runtime = src.Runtime + if src.Scale == nil { + dst.Scale = nil + } else { + dst.Scale = new(int) + *dst.Scale = *src.Scale + } + if src.Secrets == nil { + dst.Secrets = nil + } else { + if dst.Secrets != nil { + if len(src.Secrets) > len(dst.Secrets) { + if cap(dst.Secrets) >= len(src.Secrets) { + dst.Secrets = (dst.Secrets)[:len(src.Secrets)] + } else { + dst.Secrets = make([]ServiceSecretConfig, len(src.Secrets)) + } + } else if len(src.Secrets) < len(dst.Secrets) { + dst.Secrets = (dst.Secrets)[:len(src.Secrets)] + } + } else { + dst.Secrets = make([]ServiceSecretConfig, len(src.Secrets)) + } + deriveDeepCopy_23(dst.Secrets, src.Secrets) + } + if src.SecurityOpt == nil { + dst.SecurityOpt = nil + } else { + if dst.SecurityOpt != nil { + if len(src.SecurityOpt) > len(dst.SecurityOpt) { + if cap(dst.SecurityOpt) >= len(src.SecurityOpt) { + dst.SecurityOpt = (dst.SecurityOpt)[:len(src.SecurityOpt)] + } else { + dst.SecurityOpt = make([]string, len(src.SecurityOpt)) + } + } else if len(src.SecurityOpt) < len(dst.SecurityOpt) { + dst.SecurityOpt = (dst.SecurityOpt)[:len(src.SecurityOpt)] + } + } else { + dst.SecurityOpt = make([]string, len(src.SecurityOpt)) + } + copy(dst.SecurityOpt, src.SecurityOpt) + } + dst.ShmSize = src.ShmSize + dst.StdinOpen = src.StdinOpen + if src.StopGracePeriod == nil { + dst.StopGracePeriod = nil + } else { + dst.StopGracePeriod = new(Duration) + *dst.StopGracePeriod = *src.StopGracePeriod + } + dst.StopSignal = src.StopSignal + if src.StorageOpt != nil { + dst.StorageOpt = make(map[string]string, len(src.StorageOpt)) + deriveDeepCopy_5(dst.StorageOpt, src.StorageOpt) + } else { + dst.StorageOpt = nil + } + if src.Sysctls != nil { + dst.Sysctls = make(map[string]string, len(src.Sysctls)) + deriveDeepCopy_5(dst.Sysctls, src.Sysctls) + } else { + dst.Sysctls = nil + } + if src.Tmpfs == nil { + dst.Tmpfs = nil + } else { + if dst.Tmpfs != nil { + if len(src.Tmpfs) > len(dst.Tmpfs) { + if cap(dst.Tmpfs) >= len(src.Tmpfs) { + dst.Tmpfs = (dst.Tmpfs)[:len(src.Tmpfs)] + } else { + dst.Tmpfs = make([]string, len(src.Tmpfs)) + } + } else if len(src.Tmpfs) < len(dst.Tmpfs) { + dst.Tmpfs = (dst.Tmpfs)[:len(src.Tmpfs)] + } + } else { + dst.Tmpfs = make([]string, len(src.Tmpfs)) + } + copy(dst.Tmpfs, src.Tmpfs) + } + dst.Tty = src.Tty + if src.Ulimits != nil { + dst.Ulimits = make(map[string]*UlimitsConfig, len(src.Ulimits)) + deriveDeepCopy_24(dst.Ulimits, src.Ulimits) + } else { + dst.Ulimits = nil + } + dst.UseAPISocket = src.UseAPISocket + dst.User = src.User + dst.UserNSMode = src.UserNSMode + dst.Uts = src.Uts + dst.VolumeDriver = src.VolumeDriver + if src.Volumes == nil { + dst.Volumes = nil + } else { + if dst.Volumes != nil { + if len(src.Volumes) > len(dst.Volumes) { + if cap(dst.Volumes) >= len(src.Volumes) { + dst.Volumes = (dst.Volumes)[:len(src.Volumes)] + } else { + dst.Volumes = make([]ServiceVolumeConfig, len(src.Volumes)) + } + } else if len(src.Volumes) < len(dst.Volumes) { + dst.Volumes = (dst.Volumes)[:len(src.Volumes)] + } + } else { + dst.Volumes = make([]ServiceVolumeConfig, len(src.Volumes)) + } + deriveDeepCopy_25(dst.Volumes, src.Volumes) + } + if src.VolumesFrom == nil { + dst.VolumesFrom = nil + } else { + if dst.VolumesFrom != nil { + if len(src.VolumesFrom) > len(dst.VolumesFrom) { + if cap(dst.VolumesFrom) >= len(src.VolumesFrom) { + dst.VolumesFrom = (dst.VolumesFrom)[:len(src.VolumesFrom)] + } else { + dst.VolumesFrom = make([]string, len(src.VolumesFrom)) + } + } else if len(src.VolumesFrom) < len(dst.VolumesFrom) { + dst.VolumesFrom = (dst.VolumesFrom)[:len(src.VolumesFrom)] + } + } else { + dst.VolumesFrom = make([]string, len(src.VolumesFrom)) + } + copy(dst.VolumesFrom, src.VolumesFrom) + } + dst.WorkingDir = src.WorkingDir + if src.PostStart == nil { + dst.PostStart = nil + } else { + if dst.PostStart != nil { + if len(src.PostStart) > len(dst.PostStart) { + if cap(dst.PostStart) >= len(src.PostStart) { + dst.PostStart = (dst.PostStart)[:len(src.PostStart)] + } else { + dst.PostStart = make([]ServiceHook, len(src.PostStart)) + } + } else if len(src.PostStart) < len(dst.PostStart) { + dst.PostStart = (dst.PostStart)[:len(src.PostStart)] + } + } else { + dst.PostStart = make([]ServiceHook, len(src.PostStart)) + } + deriveDeepCopy_26(dst.PostStart, src.PostStart) + } + if src.PreStop == nil { + dst.PreStop = nil + } else { + if dst.PreStop != nil { + if len(src.PreStop) > len(dst.PreStop) { + if cap(dst.PreStop) >= len(src.PreStop) { + dst.PreStop = (dst.PreStop)[:len(src.PreStop)] + } else { + dst.PreStop = make([]ServiceHook, len(src.PreStop)) + } + } else if len(src.PreStop) < len(dst.PreStop) { + dst.PreStop = (dst.PreStop)[:len(src.PreStop)] + } + } else { + dst.PreStop = make([]ServiceHook, len(src.PreStop)) + } + deriveDeepCopy_26(dst.PreStop, src.PreStop) + } + if src.Extensions != nil { + dst.Extensions = make(map[string]any, len(src.Extensions)) + src.Extensions.DeepCopy(dst.Extensions) + } else { + dst.Extensions = nil + } +} + +// deriveDeepCopy recursively copies the contents of src into dst. +func deriveDeepCopy(dst, src map[string]ServiceConfig) { + for src_key, src_value := range src { + func() { + field := new(ServiceConfig) + deriveDeepCopyService(field, &src_value) + dst[src_key] = *field + }() + } +} + +// deriveDeepCopy_ recursively copies the contents of src into dst. +func deriveDeepCopy_(dst, src map[string]NetworkConfig) { + for src_key, src_value := range src { + func() { + field := new(NetworkConfig) + deriveDeepCopy_27(field, &src_value) + dst[src_key] = *field + }() + } +} + +// deriveDeepCopy_1 recursively copies the contents of src into dst. +func deriveDeepCopy_1(dst, src map[string]VolumeConfig) { + for src_key, src_value := range src { + func() { + field := new(VolumeConfig) + deriveDeepCopy_28(field, &src_value) + dst[src_key] = *field + }() + } +} + +// deriveDeepCopy_2 recursively copies the contents of src into dst. +func deriveDeepCopy_2(dst, src map[string]SecretConfig) { + for src_key, src_value := range src { + func() { + field := new(SecretConfig) + deriveDeepCopy_29(field, &src_value) + dst[src_key] = *field + }() + } +} + +// deriveDeepCopy_3 recursively copies the contents of src into dst. +func deriveDeepCopy_3(dst, src map[string]ConfigObjConfig) { + for src_key, src_value := range src { + func() { + field := new(ConfigObjConfig) + deriveDeepCopy_30(field, &src_value) + dst[src_key] = *field + }() + } +} + +// deriveDeepCopy_4 recursively copies the contents of src into dst. +func deriveDeepCopy_4(dst, src map[string]ModelConfig) { + for src_key, src_value := range src { + func() { + field := new(ModelConfig) + deriveDeepCopy_31(field, &src_value) + dst[src_key] = *field + }() + } +} + +// deriveDeepCopy_5 recursively copies the contents of src into dst. +func deriveDeepCopy_5(dst, src map[string]string) { + for src_key, src_value := range src { + dst[src_key] = src_value + } +} + +// deriveDeepCopy_6 recursively copies the contents of src into dst. +func deriveDeepCopy_6(dst, src *BuildConfig) { + dst.Context = src.Context + dst.Dockerfile = src.Dockerfile + dst.DockerfileInline = src.DockerfileInline + if src.Entitlements == nil { + dst.Entitlements = nil + } else { + if dst.Entitlements != nil { + if len(src.Entitlements) > len(dst.Entitlements) { + if cap(dst.Entitlements) >= len(src.Entitlements) { + dst.Entitlements = (dst.Entitlements)[:len(src.Entitlements)] + } else { + dst.Entitlements = make([]string, len(src.Entitlements)) + } + } else if len(src.Entitlements) < len(dst.Entitlements) { + dst.Entitlements = (dst.Entitlements)[:len(src.Entitlements)] + } + } else { + dst.Entitlements = make([]string, len(src.Entitlements)) + } + copy(dst.Entitlements, src.Entitlements) + } + if src.Args != nil { + dst.Args = make(map[string]*string, len(src.Args)) + deriveDeepCopy_15(dst.Args, src.Args) + } else { + dst.Args = nil + } + dst.Provenance = src.Provenance + dst.SBOM = src.SBOM + if src.SSH == nil { + dst.SSH = nil + } else { + if dst.SSH != nil { + if len(src.SSH) > len(dst.SSH) { + if cap(dst.SSH) >= len(src.SSH) { + dst.SSH = (dst.SSH)[:len(src.SSH)] + } else { + dst.SSH = make([]SSHKey, len(src.SSH)) + } + } else if len(src.SSH) < len(dst.SSH) { + dst.SSH = (dst.SSH)[:len(src.SSH)] + } + } else { + dst.SSH = make([]SSHKey, len(src.SSH)) + } + copy(dst.SSH, src.SSH) + } + if src.Labels != nil { + dst.Labels = make(map[string]string, len(src.Labels)) + deriveDeepCopy_5(dst.Labels, src.Labels) + } else { + dst.Labels = nil + } + if src.CacheFrom == nil { + dst.CacheFrom = nil + } else { + if dst.CacheFrom != nil { + if len(src.CacheFrom) > len(dst.CacheFrom) { + if cap(dst.CacheFrom) >= len(src.CacheFrom) { + dst.CacheFrom = (dst.CacheFrom)[:len(src.CacheFrom)] + } else { + dst.CacheFrom = make([]string, len(src.CacheFrom)) + } + } else if len(src.CacheFrom) < len(dst.CacheFrom) { + dst.CacheFrom = (dst.CacheFrom)[:len(src.CacheFrom)] + } + } else { + dst.CacheFrom = make([]string, len(src.CacheFrom)) + } + copy(dst.CacheFrom, src.CacheFrom) + } + if src.CacheTo == nil { + dst.CacheTo = nil + } else { + if dst.CacheTo != nil { + if len(src.CacheTo) > len(dst.CacheTo) { + if cap(dst.CacheTo) >= len(src.CacheTo) { + dst.CacheTo = (dst.CacheTo)[:len(src.CacheTo)] + } else { + dst.CacheTo = make([]string, len(src.CacheTo)) + } + } else if len(src.CacheTo) < len(dst.CacheTo) { + dst.CacheTo = (dst.CacheTo)[:len(src.CacheTo)] + } + } else { + dst.CacheTo = make([]string, len(src.CacheTo)) + } + copy(dst.CacheTo, src.CacheTo) + } + dst.NoCache = src.NoCache + if src.NoCacheFilter == nil { + dst.NoCacheFilter = nil + } else { + if dst.NoCacheFilter != nil { + if len(src.NoCacheFilter) > len(dst.NoCacheFilter) { + if cap(dst.NoCacheFilter) >= len(src.NoCacheFilter) { + dst.NoCacheFilter = (dst.NoCacheFilter)[:len(src.NoCacheFilter)] + } else { + dst.NoCacheFilter = make([]string, len(src.NoCacheFilter)) + } + } else if len(src.NoCacheFilter) < len(dst.NoCacheFilter) { + dst.NoCacheFilter = (dst.NoCacheFilter)[:len(src.NoCacheFilter)] + } + } else { + dst.NoCacheFilter = make([]string, len(src.NoCacheFilter)) + } + copy(dst.NoCacheFilter, src.NoCacheFilter) + } + if src.AdditionalContexts != nil { + dst.AdditionalContexts = make(map[string]string, len(src.AdditionalContexts)) + deriveDeepCopy_5(dst.AdditionalContexts, src.AdditionalContexts) + } else { + dst.AdditionalContexts = nil + } + dst.Pull = src.Pull + if src.ExtraHosts != nil { + dst.ExtraHosts = make(map[string][]string, len(src.ExtraHosts)) + deriveDeepCopy_16(dst.ExtraHosts, src.ExtraHosts) + } else { + dst.ExtraHosts = nil + } + dst.Isolation = src.Isolation + dst.Network = src.Network + dst.Target = src.Target + if src.Secrets == nil { + dst.Secrets = nil + } else { + if dst.Secrets != nil { + if len(src.Secrets) > len(dst.Secrets) { + if cap(dst.Secrets) >= len(src.Secrets) { + dst.Secrets = (dst.Secrets)[:len(src.Secrets)] + } else { + dst.Secrets = make([]ServiceSecretConfig, len(src.Secrets)) + } + } else if len(src.Secrets) < len(dst.Secrets) { + dst.Secrets = (dst.Secrets)[:len(src.Secrets)] + } + } else { + dst.Secrets = make([]ServiceSecretConfig, len(src.Secrets)) + } + deriveDeepCopy_23(dst.Secrets, src.Secrets) + } + dst.ShmSize = src.ShmSize + if src.Tags == nil { + dst.Tags = nil + } else { + if dst.Tags != nil { + if len(src.Tags) > len(dst.Tags) { + if cap(dst.Tags) >= len(src.Tags) { + dst.Tags = (dst.Tags)[:len(src.Tags)] + } else { + dst.Tags = make([]string, len(src.Tags)) + } + } else if len(src.Tags) < len(dst.Tags) { + dst.Tags = (dst.Tags)[:len(src.Tags)] + } + } else { + dst.Tags = make([]string, len(src.Tags)) + } + copy(dst.Tags, src.Tags) + } + if src.Ulimits != nil { + dst.Ulimits = make(map[string]*UlimitsConfig, len(src.Ulimits)) + deriveDeepCopy_24(dst.Ulimits, src.Ulimits) + } else { + dst.Ulimits = nil + } + if src.Platforms == nil { + dst.Platforms = nil + } else { + if dst.Platforms != nil { + if len(src.Platforms) > len(dst.Platforms) { + if cap(dst.Platforms) >= len(src.Platforms) { + dst.Platforms = (dst.Platforms)[:len(src.Platforms)] + } else { + dst.Platforms = make([]string, len(src.Platforms)) + } + } else if len(src.Platforms) < len(dst.Platforms) { + dst.Platforms = (dst.Platforms)[:len(src.Platforms)] + } + } else { + dst.Platforms = make([]string, len(src.Platforms)) + } + copy(dst.Platforms, src.Platforms) + } + dst.Privileged = src.Privileged + if src.Extensions != nil { + dst.Extensions = make(map[string]any, len(src.Extensions)) + src.Extensions.DeepCopy(dst.Extensions) + } else { + dst.Extensions = nil + } +} + +// deriveDeepCopy_7 recursively copies the contents of src into dst. +func deriveDeepCopy_7(dst, src *DevelopConfig) { + if src.Watch == nil { + dst.Watch = nil + } else { + if dst.Watch != nil { + if len(src.Watch) > len(dst.Watch) { + if cap(dst.Watch) >= len(src.Watch) { + dst.Watch = (dst.Watch)[:len(src.Watch)] + } else { + dst.Watch = make([]Trigger, len(src.Watch)) + } + } else if len(src.Watch) < len(dst.Watch) { + dst.Watch = (dst.Watch)[:len(src.Watch)] + } + } else { + dst.Watch = make([]Trigger, len(src.Watch)) + } + deriveDeepCopy_32(dst.Watch, src.Watch) + } + if src.Extensions != nil { + dst.Extensions = make(map[string]any, len(src.Extensions)) + src.Extensions.DeepCopy(dst.Extensions) + } else { + dst.Extensions = nil + } +} + +// deriveDeepCopy_8 recursively copies the contents of src into dst. +func deriveDeepCopy_8(dst, src *BlkioConfig) { + dst.Weight = src.Weight + if src.WeightDevice == nil { + dst.WeightDevice = nil + } else { + if dst.WeightDevice != nil { + if len(src.WeightDevice) > len(dst.WeightDevice) { + if cap(dst.WeightDevice) >= len(src.WeightDevice) { + dst.WeightDevice = (dst.WeightDevice)[:len(src.WeightDevice)] + } else { + dst.WeightDevice = make([]WeightDevice, len(src.WeightDevice)) + } + } else if len(src.WeightDevice) < len(dst.WeightDevice) { + dst.WeightDevice = (dst.WeightDevice)[:len(src.WeightDevice)] + } + } else { + dst.WeightDevice = make([]WeightDevice, len(src.WeightDevice)) + } + deriveDeepCopy_33(dst.WeightDevice, src.WeightDevice) + } + if src.DeviceReadBps == nil { + dst.DeviceReadBps = nil + } else { + if dst.DeviceReadBps != nil { + if len(src.DeviceReadBps) > len(dst.DeviceReadBps) { + if cap(dst.DeviceReadBps) >= len(src.DeviceReadBps) { + dst.DeviceReadBps = (dst.DeviceReadBps)[:len(src.DeviceReadBps)] + } else { + dst.DeviceReadBps = make([]ThrottleDevice, len(src.DeviceReadBps)) + } + } else if len(src.DeviceReadBps) < len(dst.DeviceReadBps) { + dst.DeviceReadBps = (dst.DeviceReadBps)[:len(src.DeviceReadBps)] + } + } else { + dst.DeviceReadBps = make([]ThrottleDevice, len(src.DeviceReadBps)) + } + deriveDeepCopy_34(dst.DeviceReadBps, src.DeviceReadBps) + } + if src.DeviceReadIOps == nil { + dst.DeviceReadIOps = nil + } else { + if dst.DeviceReadIOps != nil { + if len(src.DeviceReadIOps) > len(dst.DeviceReadIOps) { + if cap(dst.DeviceReadIOps) >= len(src.DeviceReadIOps) { + dst.DeviceReadIOps = (dst.DeviceReadIOps)[:len(src.DeviceReadIOps)] + } else { + dst.DeviceReadIOps = make([]ThrottleDevice, len(src.DeviceReadIOps)) + } + } else if len(src.DeviceReadIOps) < len(dst.DeviceReadIOps) { + dst.DeviceReadIOps = (dst.DeviceReadIOps)[:len(src.DeviceReadIOps)] + } + } else { + dst.DeviceReadIOps = make([]ThrottleDevice, len(src.DeviceReadIOps)) + } + deriveDeepCopy_34(dst.DeviceReadIOps, src.DeviceReadIOps) + } + if src.DeviceWriteBps == nil { + dst.DeviceWriteBps = nil + } else { + if dst.DeviceWriteBps != nil { + if len(src.DeviceWriteBps) > len(dst.DeviceWriteBps) { + if cap(dst.DeviceWriteBps) >= len(src.DeviceWriteBps) { + dst.DeviceWriteBps = (dst.DeviceWriteBps)[:len(src.DeviceWriteBps)] + } else { + dst.DeviceWriteBps = make([]ThrottleDevice, len(src.DeviceWriteBps)) + } + } else if len(src.DeviceWriteBps) < len(dst.DeviceWriteBps) { + dst.DeviceWriteBps = (dst.DeviceWriteBps)[:len(src.DeviceWriteBps)] + } + } else { + dst.DeviceWriteBps = make([]ThrottleDevice, len(src.DeviceWriteBps)) + } + deriveDeepCopy_34(dst.DeviceWriteBps, src.DeviceWriteBps) + } + if src.DeviceWriteIOps == nil { + dst.DeviceWriteIOps = nil + } else { + if dst.DeviceWriteIOps != nil { + if len(src.DeviceWriteIOps) > len(dst.DeviceWriteIOps) { + if cap(dst.DeviceWriteIOps) >= len(src.DeviceWriteIOps) { + dst.DeviceWriteIOps = (dst.DeviceWriteIOps)[:len(src.DeviceWriteIOps)] + } else { + dst.DeviceWriteIOps = make([]ThrottleDevice, len(src.DeviceWriteIOps)) + } + } else if len(src.DeviceWriteIOps) < len(dst.DeviceWriteIOps) { + dst.DeviceWriteIOps = (dst.DeviceWriteIOps)[:len(src.DeviceWriteIOps)] + } + } else { + dst.DeviceWriteIOps = make([]ThrottleDevice, len(src.DeviceWriteIOps)) + } + deriveDeepCopy_34(dst.DeviceWriteIOps, src.DeviceWriteIOps) + } + if src.Extensions != nil { + dst.Extensions = make(map[string]any, len(src.Extensions)) + src.Extensions.DeepCopy(dst.Extensions) + } else { + dst.Extensions = nil + } +} + +// deriveDeepCopy_9 recursively copies the contents of src into dst. +func deriveDeepCopy_9(dst, src []ServiceConfigObjConfig) { + for src_i, src_value := range src { + func() { + field := new(ServiceConfigObjConfig) + deriveDeepCopy_35(field, &src_value) + dst[src_i] = *field + }() + } +} + +// deriveDeepCopy_10 recursively copies the contents of src into dst. +func deriveDeepCopy_10(dst, src *CredentialSpecConfig) { + dst.Config = src.Config + dst.File = src.File + dst.Registry = src.Registry + if src.Extensions != nil { + dst.Extensions = make(map[string]any, len(src.Extensions)) + src.Extensions.DeepCopy(dst.Extensions) + } else { + dst.Extensions = nil + } +} + +// deriveDeepCopy_11 recursively copies the contents of src into dst. +func deriveDeepCopy_11(dst, src map[string]ServiceDependency) { + for src_key, src_value := range src { + func() { + field := new(ServiceDependency) + deriveDeepCopy_36(field, &src_value) + dst[src_key] = *field + }() + } +} + +// deriveDeepCopy_12 recursively copies the contents of src into dst. +func deriveDeepCopy_12(dst, src *DeployConfig) { + dst.Mode = src.Mode + if src.Replicas == nil { + dst.Replicas = nil + } else { + dst.Replicas = new(int) + *dst.Replicas = *src.Replicas + } + if src.Labels != nil { + dst.Labels = make(map[string]string, len(src.Labels)) + deriveDeepCopy_5(dst.Labels, src.Labels) + } else { + dst.Labels = nil + } + if src.UpdateConfig == nil { + dst.UpdateConfig = nil + } else { + dst.UpdateConfig = new(UpdateConfig) + deriveDeepCopy_37(dst.UpdateConfig, src.UpdateConfig) + } + if src.RollbackConfig == nil { + dst.RollbackConfig = nil + } else { + dst.RollbackConfig = new(UpdateConfig) + deriveDeepCopy_37(dst.RollbackConfig, src.RollbackConfig) + } + func() { + field := new(Resources) + deriveDeepCopy_38(field, &src.Resources) + dst.Resources = *field + }() + if src.RestartPolicy == nil { + dst.RestartPolicy = nil + } else { + dst.RestartPolicy = new(RestartPolicy) + deriveDeepCopy_39(dst.RestartPolicy, src.RestartPolicy) + } + func() { + field := new(Placement) + deriveDeepCopy_40(field, &src.Placement) + dst.Placement = *field + }() + dst.EndpointMode = src.EndpointMode + if src.Extensions != nil { + dst.Extensions = make(map[string]any, len(src.Extensions)) + src.Extensions.DeepCopy(dst.Extensions) + } else { + dst.Extensions = nil + } +} + +// deriveDeepCopy_13 recursively copies the contents of src into dst. +func deriveDeepCopy_13(dst, src []DeviceMapping) { + for src_i, src_value := range src { + func() { + field := new(DeviceMapping) + deriveDeepCopy_41(field, &src_value) + dst[src_i] = *field + }() + } +} + +// deriveDeepCopy_14 recursively copies the contents of src into dst. +func deriveDeepCopy_14(dst, src *ServiceProviderConfig) { + dst.Type = src.Type + if src.Options != nil { + dst.Options = make(map[string][]string, len(src.Options)) + deriveDeepCopy_16(dst.Options, src.Options) + } else { + dst.Options = nil + } + if src.Extensions != nil { + dst.Extensions = make(map[string]any, len(src.Extensions)) + src.Extensions.DeepCopy(dst.Extensions) + } else { + dst.Extensions = nil + } +} + +// deriveDeepCopy_15 recursively copies the contents of src into dst. +func deriveDeepCopy_15(dst, src map[string]*string) { + for src_key, src_value := range src { + if src_value == nil { + dst[src_key] = nil + } + if src_value == nil { + dst[src_key] = nil + } else { + dst[src_key] = new(string) + *dst[src_key] = *src_value + } + } +} + +// deriveDeepCopy_16 recursively copies the contents of src into dst. +func deriveDeepCopy_16(dst, src map[string][]string) { + for src_key, src_value := range src { + if src_value == nil { + dst[src_key] = nil + } + if src_value == nil { + dst[src_key] = nil + } else { + if dst[src_key] != nil { + if len(src_value) > len(dst[src_key]) { + if cap(dst[src_key]) >= len(src_value) { + dst[src_key] = (dst[src_key])[:len(src_value)] + } else { + dst[src_key] = make([]string, len(src_value)) + } + } else if len(src_value) < len(dst[src_key]) { + dst[src_key] = (dst[src_key])[:len(src_value)] + } + } else { + dst[src_key] = make([]string, len(src_value)) + } + copy(dst[src_key], src_value) + } + } +} + +// deriveDeepCopy_17 recursively copies the contents of src into dst. +func deriveDeepCopy_17(dst, src []DeviceRequest) { + for src_i, src_value := range src { + func() { + field := new(DeviceRequest) + deriveDeepCopy_42(field, &src_value) + dst[src_i] = *field + }() + } +} + +// deriveDeepCopy_18 recursively copies the contents of src into dst. +func deriveDeepCopy_18(dst, src *HealthCheckConfig) { + if src.Test == nil { + dst.Test = nil + } else { + if dst.Test != nil { + if len(src.Test) > len(dst.Test) { + if cap(dst.Test) >= len(src.Test) { + dst.Test = (dst.Test)[:len(src.Test)] + } else { + dst.Test = make([]string, len(src.Test)) + } + } else if len(src.Test) < len(dst.Test) { + dst.Test = (dst.Test)[:len(src.Test)] + } + } else { + dst.Test = make([]string, len(src.Test)) + } + copy(dst.Test, src.Test) + } + if src.Timeout == nil { + dst.Timeout = nil + } else { + dst.Timeout = new(Duration) + *dst.Timeout = *src.Timeout + } + if src.Interval == nil { + dst.Interval = nil + } else { + dst.Interval = new(Duration) + *dst.Interval = *src.Interval + } + if src.Retries == nil { + dst.Retries = nil + } else { + dst.Retries = new(uint64) + *dst.Retries = *src.Retries + } + if src.StartPeriod == nil { + dst.StartPeriod = nil + } else { + dst.StartPeriod = new(Duration) + *dst.StartPeriod = *src.StartPeriod + } + if src.StartInterval == nil { + dst.StartInterval = nil + } else { + dst.StartInterval = new(Duration) + *dst.StartInterval = *src.StartInterval + } + dst.Disable = src.Disable + if src.Extensions != nil { + dst.Extensions = make(map[string]any, len(src.Extensions)) + src.Extensions.DeepCopy(dst.Extensions) + } else { + dst.Extensions = nil + } +} + +// deriveDeepCopy_19 recursively copies the contents of src into dst. +func deriveDeepCopy_19(dst, src *LoggingConfig) { + dst.Driver = src.Driver + if src.Options != nil { + dst.Options = make(map[string]string, len(src.Options)) + deriveDeepCopy_5(dst.Options, src.Options) + } else { + dst.Options = nil + } + if src.Extensions != nil { + dst.Extensions = make(map[string]any, len(src.Extensions)) + src.Extensions.DeepCopy(dst.Extensions) + } else { + dst.Extensions = nil + } +} + +// deriveDeepCopy_20 recursively copies the contents of src into dst. +func deriveDeepCopy_20(dst, src map[string]*ServiceModelConfig) { + for src_key, src_value := range src { + if src_value == nil { + dst[src_key] = nil + } + if src_value == nil { + dst[src_key] = nil + } else { + dst[src_key] = new(ServiceModelConfig) + deriveDeepCopy_43(dst[src_key], src_value) + } + } +} + +// deriveDeepCopy_21 recursively copies the contents of src into dst. +func deriveDeepCopy_21(dst, src map[string]*ServiceNetworkConfig) { + for src_key, src_value := range src { + if src_value == nil { + dst[src_key] = nil + } + if src_value == nil { + dst[src_key] = nil + } else { + dst[src_key] = new(ServiceNetworkConfig) + deriveDeepCopy_44(dst[src_key], src_value) + } + } +} + +// deriveDeepCopy_22 recursively copies the contents of src into dst. +func deriveDeepCopy_22(dst, src []ServicePortConfig) { + for src_i, src_value := range src { + func() { + field := new(ServicePortConfig) + deriveDeepCopy_45(field, &src_value) + dst[src_i] = *field + }() + } +} + +// deriveDeepCopy_23 recursively copies the contents of src into dst. +func deriveDeepCopy_23(dst, src []ServiceSecretConfig) { + for src_i, src_value := range src { + func() { + field := new(ServiceSecretConfig) + deriveDeepCopy_46(field, &src_value) + dst[src_i] = *field + }() + } +} + +// deriveDeepCopy_24 recursively copies the contents of src into dst. +func deriveDeepCopy_24(dst, src map[string]*UlimitsConfig) { + for src_key, src_value := range src { + if src_value == nil { + dst[src_key] = nil + } + if src_value == nil { + dst[src_key] = nil + } else { + dst[src_key] = new(UlimitsConfig) + deriveDeepCopy_47(dst[src_key], src_value) + } + } +} + +// deriveDeepCopy_25 recursively copies the contents of src into dst. +func deriveDeepCopy_25(dst, src []ServiceVolumeConfig) { + for src_i, src_value := range src { + func() { + field := new(ServiceVolumeConfig) + deriveDeepCopy_48(field, &src_value) + dst[src_i] = *field + }() + } +} + +// deriveDeepCopy_26 recursively copies the contents of src into dst. +func deriveDeepCopy_26(dst, src []ServiceHook) { + for src_i, src_value := range src { + func() { + field := new(ServiceHook) + deriveDeepCopy_49(field, &src_value) + dst[src_i] = *field + }() + } +} + +// deriveDeepCopy_27 recursively copies the contents of src into dst. +func deriveDeepCopy_27(dst, src *NetworkConfig) { + dst.Name = src.Name + dst.Driver = src.Driver + if src.DriverOpts != nil { + dst.DriverOpts = make(map[string]string, len(src.DriverOpts)) + deriveDeepCopy_5(dst.DriverOpts, src.DriverOpts) + } else { + dst.DriverOpts = nil + } + func() { + field := new(IPAMConfig) + deriveDeepCopy_50(field, &src.Ipam) + dst.Ipam = *field + }() + dst.External = src.External + dst.Internal = src.Internal + dst.Attachable = src.Attachable + if src.Labels != nil { + dst.Labels = make(map[string]string, len(src.Labels)) + deriveDeepCopy_5(dst.Labels, src.Labels) + } else { + dst.Labels = nil + } + if src.CustomLabels != nil { + dst.CustomLabels = make(map[string]string, len(src.CustomLabels)) + deriveDeepCopy_5(dst.CustomLabels, src.CustomLabels) + } else { + dst.CustomLabels = nil + } + if src.EnableIPv4 == nil { + dst.EnableIPv4 = nil + } else { + dst.EnableIPv4 = new(bool) + *dst.EnableIPv4 = *src.EnableIPv4 + } + if src.EnableIPv6 == nil { + dst.EnableIPv6 = nil + } else { + dst.EnableIPv6 = new(bool) + *dst.EnableIPv6 = *src.EnableIPv6 + } + if src.Extensions != nil { + dst.Extensions = make(map[string]any, len(src.Extensions)) + src.Extensions.DeepCopy(dst.Extensions) + } else { + dst.Extensions = nil + } +} + +// deriveDeepCopy_28 recursively copies the contents of src into dst. +func deriveDeepCopy_28(dst, src *VolumeConfig) { + dst.Name = src.Name + dst.Driver = src.Driver + if src.DriverOpts != nil { + dst.DriverOpts = make(map[string]string, len(src.DriverOpts)) + deriveDeepCopy_5(dst.DriverOpts, src.DriverOpts) + } else { + dst.DriverOpts = nil + } + dst.External = src.External + if src.Labels != nil { + dst.Labels = make(map[string]string, len(src.Labels)) + deriveDeepCopy_5(dst.Labels, src.Labels) + } else { + dst.Labels = nil + } + if src.CustomLabels != nil { + dst.CustomLabels = make(map[string]string, len(src.CustomLabels)) + deriveDeepCopy_5(dst.CustomLabels, src.CustomLabels) + } else { + dst.CustomLabels = nil + } + if src.Extensions != nil { + dst.Extensions = make(map[string]any, len(src.Extensions)) + src.Extensions.DeepCopy(dst.Extensions) + } else { + dst.Extensions = nil + } +} + +// deriveDeepCopy_29 recursively copies the contents of src into dst. +func deriveDeepCopy_29(dst, src *SecretConfig) { + dst.Name = src.Name + dst.File = src.File + dst.Environment = src.Environment + dst.Content = src.Content + dst.marshallContent = src.marshallContent + dst.External = src.External + if src.Labels != nil { + dst.Labels = make(map[string]string, len(src.Labels)) + deriveDeepCopy_5(dst.Labels, src.Labels) + } else { + dst.Labels = nil + } + dst.Driver = src.Driver + if src.DriverOpts != nil { + dst.DriverOpts = make(map[string]string, len(src.DriverOpts)) + deriveDeepCopy_5(dst.DriverOpts, src.DriverOpts) + } else { + dst.DriverOpts = nil + } + dst.TemplateDriver = src.TemplateDriver + if src.Extensions != nil { + dst.Extensions = make(map[string]any, len(src.Extensions)) + src.Extensions.DeepCopy(dst.Extensions) + } else { + dst.Extensions = nil + } +} + +// deriveDeepCopy_30 recursively copies the contents of src into dst. +func deriveDeepCopy_30(dst, src *ConfigObjConfig) { + dst.Name = src.Name + dst.File = src.File + dst.Environment = src.Environment + dst.Content = src.Content + dst.marshallContent = src.marshallContent + dst.External = src.External + if src.Labels != nil { + dst.Labels = make(map[string]string, len(src.Labels)) + deriveDeepCopy_5(dst.Labels, src.Labels) + } else { + dst.Labels = nil + } + dst.Driver = src.Driver + if src.DriverOpts != nil { + dst.DriverOpts = make(map[string]string, len(src.DriverOpts)) + deriveDeepCopy_5(dst.DriverOpts, src.DriverOpts) + } else { + dst.DriverOpts = nil + } + dst.TemplateDriver = src.TemplateDriver + if src.Extensions != nil { + dst.Extensions = make(map[string]any, len(src.Extensions)) + src.Extensions.DeepCopy(dst.Extensions) + } else { + dst.Extensions = nil + } +} + +// deriveDeepCopy_31 recursively copies the contents of src into dst. +func deriveDeepCopy_31(dst, src *ModelConfig) { + dst.Name = src.Name + dst.Model = src.Model + dst.ContextSize = src.ContextSize + if src.RuntimeFlags == nil { + dst.RuntimeFlags = nil + } else { + if dst.RuntimeFlags != nil { + if len(src.RuntimeFlags) > len(dst.RuntimeFlags) { + if cap(dst.RuntimeFlags) >= len(src.RuntimeFlags) { + dst.RuntimeFlags = (dst.RuntimeFlags)[:len(src.RuntimeFlags)] + } else { + dst.RuntimeFlags = make([]string, len(src.RuntimeFlags)) + } + } else if len(src.RuntimeFlags) < len(dst.RuntimeFlags) { + dst.RuntimeFlags = (dst.RuntimeFlags)[:len(src.RuntimeFlags)] + } + } else { + dst.RuntimeFlags = make([]string, len(src.RuntimeFlags)) + } + copy(dst.RuntimeFlags, src.RuntimeFlags) + } + if src.Extensions != nil { + dst.Extensions = make(map[string]any, len(src.Extensions)) + src.Extensions.DeepCopy(dst.Extensions) + } else { + dst.Extensions = nil + } +} + +// deriveDeepCopy_32 recursively copies the contents of src into dst. +func deriveDeepCopy_32(dst, src []Trigger) { + for src_i, src_value := range src { + func() { + field := new(Trigger) + deriveDeepCopy_51(field, &src_value) + dst[src_i] = *field + }() + } +} + +// deriveDeepCopy_33 recursively copies the contents of src into dst. +func deriveDeepCopy_33(dst, src []WeightDevice) { + for src_i, src_value := range src { + func() { + field := new(WeightDevice) + deriveDeepCopy_52(field, &src_value) + dst[src_i] = *field + }() + } +} + +// deriveDeepCopy_34 recursively copies the contents of src into dst. +func deriveDeepCopy_34(dst, src []ThrottleDevice) { + for src_i, src_value := range src { + func() { + field := new(ThrottleDevice) + deriveDeepCopy_53(field, &src_value) + dst[src_i] = *field + }() + } +} + +// deriveDeepCopy_35 recursively copies the contents of src into dst. +func deriveDeepCopy_35(dst, src *ServiceConfigObjConfig) { + dst.Source = src.Source + dst.Target = src.Target + dst.UID = src.UID + dst.GID = src.GID + if src.Mode == nil { + dst.Mode = nil + } else { + dst.Mode = new(FileMode) + *dst.Mode = *src.Mode + } + if src.Extensions != nil { + dst.Extensions = make(map[string]any, len(src.Extensions)) + src.Extensions.DeepCopy(dst.Extensions) + } else { + dst.Extensions = nil + } +} + +// deriveDeepCopy_36 recursively copies the contents of src into dst. +func deriveDeepCopy_36(dst, src *ServiceDependency) { + dst.Condition = src.Condition + dst.Restart = src.Restart + if src.Extensions != nil { + dst.Extensions = make(map[string]any, len(src.Extensions)) + src.Extensions.DeepCopy(dst.Extensions) + } else { + dst.Extensions = nil + } + dst.Required = src.Required +} + +// deriveDeepCopy_37 recursively copies the contents of src into dst. +func deriveDeepCopy_37(dst, src *UpdateConfig) { + if src.Parallelism == nil { + dst.Parallelism = nil + } else { + dst.Parallelism = new(uint64) + *dst.Parallelism = *src.Parallelism + } + dst.Delay = src.Delay + dst.FailureAction = src.FailureAction + dst.Monitor = src.Monitor + dst.MaxFailureRatio = src.MaxFailureRatio + dst.Order = src.Order + if src.Extensions != nil { + dst.Extensions = make(map[string]any, len(src.Extensions)) + src.Extensions.DeepCopy(dst.Extensions) + } else { + dst.Extensions = nil + } +} + +// deriveDeepCopy_38 recursively copies the contents of src into dst. +func deriveDeepCopy_38(dst, src *Resources) { + if src.Limits == nil { + dst.Limits = nil + } else { + dst.Limits = new(Resource) + deriveDeepCopy_54(dst.Limits, src.Limits) + } + if src.Reservations == nil { + dst.Reservations = nil + } else { + dst.Reservations = new(Resource) + deriveDeepCopy_54(dst.Reservations, src.Reservations) + } + if src.Extensions != nil { + dst.Extensions = make(map[string]any, len(src.Extensions)) + src.Extensions.DeepCopy(dst.Extensions) + } else { + dst.Extensions = nil + } +} + +// deriveDeepCopy_39 recursively copies the contents of src into dst. +func deriveDeepCopy_39(dst, src *RestartPolicy) { + dst.Condition = src.Condition + if src.Delay == nil { + dst.Delay = nil + } else { + dst.Delay = new(Duration) + *dst.Delay = *src.Delay + } + if src.MaxAttempts == nil { + dst.MaxAttempts = nil + } else { + dst.MaxAttempts = new(uint64) + *dst.MaxAttempts = *src.MaxAttempts + } + if src.Window == nil { + dst.Window = nil + } else { + dst.Window = new(Duration) + *dst.Window = *src.Window + } + if src.Extensions != nil { + dst.Extensions = make(map[string]any, len(src.Extensions)) + src.Extensions.DeepCopy(dst.Extensions) + } else { + dst.Extensions = nil + } +} + +// deriveDeepCopy_40 recursively copies the contents of src into dst. +func deriveDeepCopy_40(dst, src *Placement) { + if src.Constraints == nil { + dst.Constraints = nil + } else { + if dst.Constraints != nil { + if len(src.Constraints) > len(dst.Constraints) { + if cap(dst.Constraints) >= len(src.Constraints) { + dst.Constraints = (dst.Constraints)[:len(src.Constraints)] + } else { + dst.Constraints = make([]string, len(src.Constraints)) + } + } else if len(src.Constraints) < len(dst.Constraints) { + dst.Constraints = (dst.Constraints)[:len(src.Constraints)] + } + } else { + dst.Constraints = make([]string, len(src.Constraints)) + } + copy(dst.Constraints, src.Constraints) + } + if src.Preferences == nil { + dst.Preferences = nil + } else { + if dst.Preferences != nil { + if len(src.Preferences) > len(dst.Preferences) { + if cap(dst.Preferences) >= len(src.Preferences) { + dst.Preferences = (dst.Preferences)[:len(src.Preferences)] + } else { + dst.Preferences = make([]PlacementPreferences, len(src.Preferences)) + } + } else if len(src.Preferences) < len(dst.Preferences) { + dst.Preferences = (dst.Preferences)[:len(src.Preferences)] + } + } else { + dst.Preferences = make([]PlacementPreferences, len(src.Preferences)) + } + deriveDeepCopy_55(dst.Preferences, src.Preferences) + } + dst.MaxReplicas = src.MaxReplicas + if src.Extensions != nil { + dst.Extensions = make(map[string]any, len(src.Extensions)) + src.Extensions.DeepCopy(dst.Extensions) + } else { + dst.Extensions = nil + } +} + +// deriveDeepCopy_41 recursively copies the contents of src into dst. +func deriveDeepCopy_41(dst, src *DeviceMapping) { + dst.Source = src.Source + dst.Target = src.Target + dst.Permissions = src.Permissions + if src.Extensions != nil { + dst.Extensions = make(map[string]any, len(src.Extensions)) + src.Extensions.DeepCopy(dst.Extensions) + } else { + dst.Extensions = nil + } +} + +// deriveDeepCopy_42 recursively copies the contents of src into dst. +func deriveDeepCopy_42(dst, src *DeviceRequest) { + if src.Capabilities == nil { + dst.Capabilities = nil + } else { + if dst.Capabilities != nil { + if len(src.Capabilities) > len(dst.Capabilities) { + if cap(dst.Capabilities) >= len(src.Capabilities) { + dst.Capabilities = (dst.Capabilities)[:len(src.Capabilities)] + } else { + dst.Capabilities = make([]string, len(src.Capabilities)) + } + } else if len(src.Capabilities) < len(dst.Capabilities) { + dst.Capabilities = (dst.Capabilities)[:len(src.Capabilities)] + } + } else { + dst.Capabilities = make([]string, len(src.Capabilities)) + } + copy(dst.Capabilities, src.Capabilities) + } + dst.Driver = src.Driver + dst.Count = src.Count + if src.IDs == nil { + dst.IDs = nil + } else { + if dst.IDs != nil { + if len(src.IDs) > len(dst.IDs) { + if cap(dst.IDs) >= len(src.IDs) { + dst.IDs = (dst.IDs)[:len(src.IDs)] + } else { + dst.IDs = make([]string, len(src.IDs)) + } + } else if len(src.IDs) < len(dst.IDs) { + dst.IDs = (dst.IDs)[:len(src.IDs)] + } + } else { + dst.IDs = make([]string, len(src.IDs)) + } + copy(dst.IDs, src.IDs) + } + if src.Options != nil { + dst.Options = make(map[string]string, len(src.Options)) + deriveDeepCopy_5(dst.Options, src.Options) + } else { + dst.Options = nil + } +} + +// deriveDeepCopy_43 recursively copies the contents of src into dst. +func deriveDeepCopy_43(dst, src *ServiceModelConfig) { + dst.EndpointVariable = src.EndpointVariable + dst.ModelVariable = src.ModelVariable + if src.Extensions != nil { + dst.Extensions = make(map[string]any, len(src.Extensions)) + src.Extensions.DeepCopy(dst.Extensions) + } else { + dst.Extensions = nil + } +} + +// deriveDeepCopy_44 recursively copies the contents of src into dst. +func deriveDeepCopy_44(dst, src *ServiceNetworkConfig) { + if src.Aliases == nil { + dst.Aliases = nil + } else { + if dst.Aliases != nil { + if len(src.Aliases) > len(dst.Aliases) { + if cap(dst.Aliases) >= len(src.Aliases) { + dst.Aliases = (dst.Aliases)[:len(src.Aliases)] + } else { + dst.Aliases = make([]string, len(src.Aliases)) + } + } else if len(src.Aliases) < len(dst.Aliases) { + dst.Aliases = (dst.Aliases)[:len(src.Aliases)] + } + } else { + dst.Aliases = make([]string, len(src.Aliases)) + } + copy(dst.Aliases, src.Aliases) + } + if src.DriverOpts != nil { + dst.DriverOpts = make(map[string]string, len(src.DriverOpts)) + deriveDeepCopy_5(dst.DriverOpts, src.DriverOpts) + } else { + dst.DriverOpts = nil + } + dst.GatewayPriority = src.GatewayPriority + dst.InterfaceName = src.InterfaceName + dst.Ipv4Address = src.Ipv4Address + dst.Ipv6Address = src.Ipv6Address + if src.LinkLocalIPs == nil { + dst.LinkLocalIPs = nil + } else { + if dst.LinkLocalIPs != nil { + if len(src.LinkLocalIPs) > len(dst.LinkLocalIPs) { + if cap(dst.LinkLocalIPs) >= len(src.LinkLocalIPs) { + dst.LinkLocalIPs = (dst.LinkLocalIPs)[:len(src.LinkLocalIPs)] + } else { + dst.LinkLocalIPs = make([]string, len(src.LinkLocalIPs)) + } + } else if len(src.LinkLocalIPs) < len(dst.LinkLocalIPs) { + dst.LinkLocalIPs = (dst.LinkLocalIPs)[:len(src.LinkLocalIPs)] + } + } else { + dst.LinkLocalIPs = make([]string, len(src.LinkLocalIPs)) + } + copy(dst.LinkLocalIPs, src.LinkLocalIPs) + } + dst.MacAddress = src.MacAddress + dst.Priority = src.Priority + if src.Extensions != nil { + dst.Extensions = make(map[string]any, len(src.Extensions)) + src.Extensions.DeepCopy(dst.Extensions) + } else { + dst.Extensions = nil + } +} + +// deriveDeepCopy_45 recursively copies the contents of src into dst. +func deriveDeepCopy_45(dst, src *ServicePortConfig) { + dst.Name = src.Name + dst.Mode = src.Mode + dst.HostIP = src.HostIP + dst.Target = src.Target + dst.Published = src.Published + dst.Protocol = src.Protocol + dst.AppProtocol = src.AppProtocol + if src.Extensions != nil { + dst.Extensions = make(map[string]any, len(src.Extensions)) + src.Extensions.DeepCopy(dst.Extensions) + } else { + dst.Extensions = nil + } +} + +// deriveDeepCopy_46 recursively copies the contents of src into dst. +func deriveDeepCopy_46(dst, src *ServiceSecretConfig) { + dst.Source = src.Source + dst.Target = src.Target + dst.UID = src.UID + dst.GID = src.GID + if src.Mode == nil { + dst.Mode = nil + } else { + dst.Mode = new(FileMode) + *dst.Mode = *src.Mode + } + if src.Extensions != nil { + dst.Extensions = make(map[string]any, len(src.Extensions)) + src.Extensions.DeepCopy(dst.Extensions) + } else { + dst.Extensions = nil + } +} + +// deriveDeepCopy_47 recursively copies the contents of src into dst. +func deriveDeepCopy_47(dst, src *UlimitsConfig) { + dst.Single = src.Single + dst.Soft = src.Soft + dst.Hard = src.Hard + if src.Extensions != nil { + dst.Extensions = make(map[string]any, len(src.Extensions)) + src.Extensions.DeepCopy(dst.Extensions) + } else { + dst.Extensions = nil + } +} + +// deriveDeepCopy_48 recursively copies the contents of src into dst. +func deriveDeepCopy_48(dst, src *ServiceVolumeConfig) { + dst.Type = src.Type + dst.Source = src.Source + dst.Target = src.Target + dst.ReadOnly = src.ReadOnly + dst.Consistency = src.Consistency + if src.Bind == nil { + dst.Bind = nil + } else { + dst.Bind = new(ServiceVolumeBind) + deriveDeepCopy_56(dst.Bind, src.Bind) + } + if src.Volume == nil { + dst.Volume = nil + } else { + dst.Volume = new(ServiceVolumeVolume) + deriveDeepCopy_57(dst.Volume, src.Volume) + } + if src.Tmpfs == nil { + dst.Tmpfs = nil + } else { + dst.Tmpfs = new(ServiceVolumeTmpfs) + deriveDeepCopy_58(dst.Tmpfs, src.Tmpfs) + } + if src.Image == nil { + dst.Image = nil + } else { + dst.Image = new(ServiceVolumeImage) + deriveDeepCopy_59(dst.Image, src.Image) + } + if src.Extensions != nil { + dst.Extensions = make(map[string]any, len(src.Extensions)) + src.Extensions.DeepCopy(dst.Extensions) + } else { + dst.Extensions = nil + } +} + +// deriveDeepCopy_49 recursively copies the contents of src into dst. +func deriveDeepCopy_49(dst, src *ServiceHook) { + if src.Command == nil { + dst.Command = nil + } else { + if dst.Command != nil { + if len(src.Command) > len(dst.Command) { + if cap(dst.Command) >= len(src.Command) { + dst.Command = (dst.Command)[:len(src.Command)] + } else { + dst.Command = make([]string, len(src.Command)) + } + } else if len(src.Command) < len(dst.Command) { + dst.Command = (dst.Command)[:len(src.Command)] + } + } else { + dst.Command = make([]string, len(src.Command)) + } + copy(dst.Command, src.Command) + } + dst.User = src.User + dst.Privileged = src.Privileged + dst.WorkingDir = src.WorkingDir + if src.Environment != nil { + dst.Environment = make(map[string]*string, len(src.Environment)) + deriveDeepCopy_15(dst.Environment, src.Environment) + } else { + dst.Environment = nil + } + if src.Extensions != nil { + dst.Extensions = make(map[string]any, len(src.Extensions)) + src.Extensions.DeepCopy(dst.Extensions) + } else { + dst.Extensions = nil + } +} + +// deriveDeepCopy_50 recursively copies the contents of src into dst. +func deriveDeepCopy_50(dst, src *IPAMConfig) { + dst.Driver = src.Driver + if src.Config == nil { + dst.Config = nil + } else { + if dst.Config != nil { + if len(src.Config) > len(dst.Config) { + if cap(dst.Config) >= len(src.Config) { + dst.Config = (dst.Config)[:len(src.Config)] + } else { + dst.Config = make([]*IPAMPool, len(src.Config)) + } + } else if len(src.Config) < len(dst.Config) { + dst.Config = (dst.Config)[:len(src.Config)] + } + } else { + dst.Config = make([]*IPAMPool, len(src.Config)) + } + deriveDeepCopy_60(dst.Config, src.Config) + } + if src.Extensions != nil { + dst.Extensions = make(map[string]any, len(src.Extensions)) + src.Extensions.DeepCopy(dst.Extensions) + } else { + dst.Extensions = nil + } +} + +// deriveDeepCopy_51 recursively copies the contents of src into dst. +func deriveDeepCopy_51(dst, src *Trigger) { + dst.Path = src.Path + dst.Action = src.Action + dst.Target = src.Target + func() { + field := new(ServiceHook) + deriveDeepCopy_49(field, &src.Exec) + dst.Exec = *field + }() + if src.Include == nil { + dst.Include = nil + } else { + if dst.Include != nil { + if len(src.Include) > len(dst.Include) { + if cap(dst.Include) >= len(src.Include) { + dst.Include = (dst.Include)[:len(src.Include)] + } else { + dst.Include = make([]string, len(src.Include)) + } + } else if len(src.Include) < len(dst.Include) { + dst.Include = (dst.Include)[:len(src.Include)] + } + } else { + dst.Include = make([]string, len(src.Include)) + } + copy(dst.Include, src.Include) + } + if src.Ignore == nil { + dst.Ignore = nil + } else { + if dst.Ignore != nil { + if len(src.Ignore) > len(dst.Ignore) { + if cap(dst.Ignore) >= len(src.Ignore) { + dst.Ignore = (dst.Ignore)[:len(src.Ignore)] + } else { + dst.Ignore = make([]string, len(src.Ignore)) + } + } else if len(src.Ignore) < len(dst.Ignore) { + dst.Ignore = (dst.Ignore)[:len(src.Ignore)] + } + } else { + dst.Ignore = make([]string, len(src.Ignore)) + } + copy(dst.Ignore, src.Ignore) + } + dst.InitialSync = src.InitialSync + if src.Extensions != nil { + dst.Extensions = make(map[string]any, len(src.Extensions)) + src.Extensions.DeepCopy(dst.Extensions) + } else { + dst.Extensions = nil + } +} + +// deriveDeepCopy_52 recursively copies the contents of src into dst. +func deriveDeepCopy_52(dst, src *WeightDevice) { + dst.Path = src.Path + dst.Weight = src.Weight + if src.Extensions != nil { + dst.Extensions = make(map[string]any, len(src.Extensions)) + src.Extensions.DeepCopy(dst.Extensions) + } else { + dst.Extensions = nil + } +} + +// deriveDeepCopy_53 recursively copies the contents of src into dst. +func deriveDeepCopy_53(dst, src *ThrottleDevice) { + dst.Path = src.Path + dst.Rate = src.Rate + if src.Extensions != nil { + dst.Extensions = make(map[string]any, len(src.Extensions)) + src.Extensions.DeepCopy(dst.Extensions) + } else { + dst.Extensions = nil + } +} + +// deriveDeepCopy_54 recursively copies the contents of src into dst. +func deriveDeepCopy_54(dst, src *Resource) { + dst.NanoCPUs = src.NanoCPUs + dst.MemoryBytes = src.MemoryBytes + dst.Pids = src.Pids + if src.Devices == nil { + dst.Devices = nil + } else { + if dst.Devices != nil { + if len(src.Devices) > len(dst.Devices) { + if cap(dst.Devices) >= len(src.Devices) { + dst.Devices = (dst.Devices)[:len(src.Devices)] + } else { + dst.Devices = make([]DeviceRequest, len(src.Devices)) + } + } else if len(src.Devices) < len(dst.Devices) { + dst.Devices = (dst.Devices)[:len(src.Devices)] + } + } else { + dst.Devices = make([]DeviceRequest, len(src.Devices)) + } + deriveDeepCopy_17(dst.Devices, src.Devices) + } + if src.GenericResources == nil { + dst.GenericResources = nil + } else { + if dst.GenericResources != nil { + if len(src.GenericResources) > len(dst.GenericResources) { + if cap(dst.GenericResources) >= len(src.GenericResources) { + dst.GenericResources = (dst.GenericResources)[:len(src.GenericResources)] + } else { + dst.GenericResources = make([]GenericResource, len(src.GenericResources)) + } + } else if len(src.GenericResources) < len(dst.GenericResources) { + dst.GenericResources = (dst.GenericResources)[:len(src.GenericResources)] + } + } else { + dst.GenericResources = make([]GenericResource, len(src.GenericResources)) + } + deriveDeepCopy_61(dst.GenericResources, src.GenericResources) + } + if src.Extensions != nil { + dst.Extensions = make(map[string]any, len(src.Extensions)) + src.Extensions.DeepCopy(dst.Extensions) + } else { + dst.Extensions = nil + } +} + +// deriveDeepCopy_55 recursively copies the contents of src into dst. +func deriveDeepCopy_55(dst, src []PlacementPreferences) { + for src_i, src_value := range src { + func() { + field := new(PlacementPreferences) + deriveDeepCopy_62(field, &src_value) + dst[src_i] = *field + }() + } +} + +// deriveDeepCopy_56 recursively copies the contents of src into dst. +func deriveDeepCopy_56(dst, src *ServiceVolumeBind) { + dst.SELinux = src.SELinux + dst.Propagation = src.Propagation + dst.CreateHostPath = src.CreateHostPath + dst.Recursive = src.Recursive + if src.Extensions != nil { + dst.Extensions = make(map[string]any, len(src.Extensions)) + src.Extensions.DeepCopy(dst.Extensions) + } else { + dst.Extensions = nil + } +} + +// deriveDeepCopy_57 recursively copies the contents of src into dst. +func deriveDeepCopy_57(dst, src *ServiceVolumeVolume) { + if src.Labels != nil { + dst.Labels = make(map[string]string, len(src.Labels)) + deriveDeepCopy_5(dst.Labels, src.Labels) + } else { + dst.Labels = nil + } + dst.NoCopy = src.NoCopy + dst.Subpath = src.Subpath + if src.Extensions != nil { + dst.Extensions = make(map[string]any, len(src.Extensions)) + src.Extensions.DeepCopy(dst.Extensions) + } else { + dst.Extensions = nil + } +} + +// deriveDeepCopy_58 recursively copies the contents of src into dst. +func deriveDeepCopy_58(dst, src *ServiceVolumeTmpfs) { + dst.Size = src.Size + dst.Mode = src.Mode + if src.Extensions != nil { + dst.Extensions = make(map[string]any, len(src.Extensions)) + src.Extensions.DeepCopy(dst.Extensions) + } else { + dst.Extensions = nil + } +} + +// deriveDeepCopy_59 recursively copies the contents of src into dst. +func deriveDeepCopy_59(dst, src *ServiceVolumeImage) { + dst.SubPath = src.SubPath + if src.Extensions != nil { + dst.Extensions = make(map[string]any, len(src.Extensions)) + src.Extensions.DeepCopy(dst.Extensions) + } else { + dst.Extensions = nil + } +} + +// deriveDeepCopy_60 recursively copies the contents of src into dst. +func deriveDeepCopy_60(dst, src []*IPAMPool) { + for src_i, src_value := range src { + if src_value == nil { + dst[src_i] = nil + } else { + dst[src_i] = new(IPAMPool) + deriveDeepCopy_63(dst[src_i], src_value) + } + } +} + +// deriveDeepCopy_61 recursively copies the contents of src into dst. +func deriveDeepCopy_61(dst, src []GenericResource) { + for src_i, src_value := range src { + func() { + field := new(GenericResource) + deriveDeepCopy_64(field, &src_value) + dst[src_i] = *field + }() + } +} + +// deriveDeepCopy_62 recursively copies the contents of src into dst. +func deriveDeepCopy_62(dst, src *PlacementPreferences) { + dst.Spread = src.Spread + if src.Extensions != nil { + dst.Extensions = make(map[string]any, len(src.Extensions)) + src.Extensions.DeepCopy(dst.Extensions) + } else { + dst.Extensions = nil + } +} + +// deriveDeepCopy_63 recursively copies the contents of src into dst. +func deriveDeepCopy_63(dst, src *IPAMPool) { + dst.Subnet = src.Subnet + dst.Gateway = src.Gateway + dst.IPRange = src.IPRange + if src.AuxiliaryAddresses != nil { + dst.AuxiliaryAddresses = make(map[string]string, len(src.AuxiliaryAddresses)) + deriveDeepCopy_5(dst.AuxiliaryAddresses, src.AuxiliaryAddresses) + } else { + dst.AuxiliaryAddresses = nil + } + if src.Extensions != nil { + dst.Extensions = make(map[string]any, len(src.Extensions)) + src.Extensions.DeepCopy(dst.Extensions) + } else { + dst.Extensions = nil + } +} + +// deriveDeepCopy_64 recursively copies the contents of src into dst. +func deriveDeepCopy_64(dst, src *GenericResource) { + if src.DiscreteResourceSpec == nil { + dst.DiscreteResourceSpec = nil + } else { + dst.DiscreteResourceSpec = new(DiscreteGenericResource) + deriveDeepCopy_65(dst.DiscreteResourceSpec, src.DiscreteResourceSpec) + } + if src.Extensions != nil { + dst.Extensions = make(map[string]any, len(src.Extensions)) + src.Extensions.DeepCopy(dst.Extensions) + } else { + dst.Extensions = nil + } +} + +// deriveDeepCopy_65 recursively copies the contents of src into dst. +func deriveDeepCopy_65(dst, src *DiscreteGenericResource) { + dst.Kind = src.Kind + dst.Value = src.Value + if src.Extensions != nil { + dst.Extensions = make(map[string]any, len(src.Extensions)) + src.Extensions.DeepCopy(dst.Extensions) + } else { + dst.Extensions = nil + } +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/types/develop.go b/vendor/github.com/compose-spec/compose-go/v2/types/develop.go new file mode 100644 index 00000000..d468a8c0 --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/types/develop.go @@ -0,0 +1,44 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package types + +type DevelopConfig struct { + Watch []Trigger `yaml:"watch,omitempty" json:"watch,omitempty"` + + Extensions Extensions `yaml:"#extensions,inline,omitempty" json:"-"` +} + +type WatchAction string + +const ( + WatchActionSync WatchAction = "sync" + WatchActionRebuild WatchAction = "rebuild" + WatchActionRestart WatchAction = "restart" + WatchActionSyncRestart WatchAction = "sync+restart" + WatchActionSyncExec WatchAction = "sync+exec" +) + +type Trigger struct { + Path string `yaml:"path" json:"path"` + Action WatchAction `yaml:"action" json:"action"` + Target string `yaml:"target,omitempty" json:"target,omitempty"` + Exec ServiceHook `yaml:"exec,omitempty" json:"exec,omitempty"` + Include []string `yaml:"include,omitempty" json:"include,omitempty"` + Ignore []string `yaml:"ignore,omitempty" json:"ignore,omitempty"` + InitialSync bool `yaml:"initial_sync,omitempty" json:"initial_sync,omitempty"` + Extensions Extensions `yaml:"#extensions,inline,omitempty" json:"-"` +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/types/device.go b/vendor/github.com/compose-spec/compose-go/v2/types/device.go new file mode 100644 index 00000000..5b30cc0c --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/types/device.go @@ -0,0 +1,53 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package types + +import ( + "fmt" + "strconv" + "strings" +) + +type DeviceRequest struct { + Capabilities []string `yaml:"capabilities,omitempty" json:"capabilities,omitempty"` + Driver string `yaml:"driver,omitempty" json:"driver,omitempty"` + Count DeviceCount `yaml:"count,omitempty" json:"count,omitempty"` + IDs []string `yaml:"device_ids,omitempty" json:"device_ids,omitempty"` + Options Mapping `yaml:"options,omitempty" json:"options,omitempty"` +} + +type DeviceCount int64 + +func (c *DeviceCount) DecodeMapstructure(value interface{}) error { + switch v := value.(type) { + case int: + *c = DeviceCount(v) + case string: + if strings.ToLower(v) == "all" { + *c = -1 + return nil + } + i, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return fmt.Errorf("invalid value %q, the only value allowed is 'all' or a number", v) + } + *c = DeviceCount(i) + default: + return fmt.Errorf("invalid type %T for device count", v) + } + return nil +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/types/duration.go b/vendor/github.com/compose-spec/compose-go/v2/types/duration.go new file mode 100644 index 00000000..c1c39730 --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/types/duration.go @@ -0,0 +1,62 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package types + +import ( + "encoding/json" + "fmt" + "strings" + "time" + + "github.com/xhit/go-str2duration/v2" +) + +// Duration is a thin wrapper around time.Duration with improved JSON marshalling +type Duration time.Duration + +func (d Duration) String() string { + return time.Duration(d).String() +} + +func (d *Duration) DecodeMapstructure(value interface{}) error { + v, err := str2duration.ParseDuration(fmt.Sprint(value)) + if err != nil { + return err + } + *d = Duration(v) + return nil +} + +// MarshalJSON makes Duration implement json.Marshaler +func (d Duration) MarshalJSON() ([]byte, error) { + return json.Marshal(d.String()) +} + +// MarshalYAML makes Duration implement yaml.Marshaler +func (d Duration) MarshalYAML() (interface{}, error) { + return d.String(), nil +} + +func (d *Duration) UnmarshalJSON(b []byte) error { + s := strings.Trim(string(b), "\"") + timeDuration, err := time.ParseDuration(s) + if err != nil { + return err + } + *d = Duration(timeDuration) + return nil +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/types/envfile.go b/vendor/github.com/compose-spec/compose-go/v2/types/envfile.go new file mode 100644 index 00000000..a7d239ee --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/types/envfile.go @@ -0,0 +1,23 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package types + +type EnvFile struct { + Path string `yaml:"path,omitempty" json:"path,omitempty"` + Required OptOut `yaml:"required,omitempty" json:"required,omitzero"` + Format string `yaml:"format,omitempty" json:"format,omitempty"` +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/types/healthcheck.go b/vendor/github.com/compose-spec/compose-go/v2/types/healthcheck.go new file mode 100644 index 00000000..c6c3b37e --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/types/healthcheck.go @@ -0,0 +1,53 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package types + +import ( + "fmt" +) + +// HealthCheckConfig the healthcheck configuration for a service +type HealthCheckConfig struct { + Test HealthCheckTest `yaml:"test,omitempty" json:"test,omitempty"` + Timeout *Duration `yaml:"timeout,omitempty" json:"timeout,omitempty"` + Interval *Duration `yaml:"interval,omitempty" json:"interval,omitempty"` + Retries *uint64 `yaml:"retries,omitempty" json:"retries,omitempty"` + StartPeriod *Duration `yaml:"start_period,omitempty" json:"start_period,omitempty"` + StartInterval *Duration `yaml:"start_interval,omitempty" json:"start_interval,omitempty"` + Disable bool `yaml:"disable,omitempty" json:"disable,omitempty"` + + Extensions Extensions `yaml:"#extensions,inline,omitempty" json:"-"` +} + +// HealthCheckTest is the command run to test the health of a service +type HealthCheckTest []string + +func (l *HealthCheckTest) DecodeMapstructure(value interface{}) error { + switch v := value.(type) { + case string: + *l = []string{"CMD-SHELL", v} + case []interface{}: + seq := make([]string, len(v)) + for i, e := range v { + seq[i] = e.(string) + } + *l = seq + default: + return fmt.Errorf("unexpected value type %T for healthcheck.test", value) + } + return nil +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/types/hooks.go b/vendor/github.com/compose-spec/compose-go/v2/types/hooks.go new file mode 100644 index 00000000..4c58c094 --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/types/hooks.go @@ -0,0 +1,28 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package types + +// ServiceHook is a command to exec inside container by some lifecycle events +type ServiceHook struct { + Command ShellCommand `yaml:"command,omitempty" json:"command"` + User string `yaml:"user,omitempty" json:"user,omitempty"` + Privileged bool `yaml:"privileged,omitempty" json:"privileged,omitempty"` + WorkingDir string `yaml:"working_dir,omitempty" json:"working_dir,omitempty"` + Environment MappingWithEquals `yaml:"environment,omitempty" json:"environment,omitempty"` + + Extensions Extensions `yaml:"#extensions,inline,omitempty" json:"-"` +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/types/hostList.go b/vendor/github.com/compose-spec/compose-go/v2/types/hostList.go new file mode 100644 index 00000000..9bc0fbc5 --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/types/hostList.go @@ -0,0 +1,144 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package types + +import ( + "encoding/json" + "fmt" + "sort" + "strings" +) + +// HostsList is a list of colon-separated host-ip mappings +type HostsList map[string][]string + +// NewHostsList creates a HostsList from a list of `host=ip` strings +func NewHostsList(hosts []string) (HostsList, error) { + list := HostsList{} + for _, s := range hosts { + var found bool + for _, sep := range hostListSerapators { + host, ip, ok := strings.Cut(s, sep) + if ok { + // Mapping found with this separator, stop here. + if ips, ok := list[host]; ok { + list[host] = append(ips, strings.Split(ip, ",")...) + } else { + list[host] = strings.Split(ip, ",") + } + found = true + break + } + } + if !found { + return nil, fmt.Errorf("invalid additional host, missing IP: %s", s) + } + } + err := list.cleanup() + return list, err +} + +// AsList returns host-ip mappings as a list of strings, using the given +// separator. The Docker Engine API expects ':' separators, the original format +// for '--add-hosts'. But an '=' separator is used in YAML/JSON renderings to +// make IPv6 addresses more readable (for example "my-host=::1" instead of +// "my-host:::1"). +func (h HostsList) AsList(sep string) []string { + l := make([]string, 0, len(h)) + for k, v := range h { + for _, ip := range v { + l = append(l, fmt.Sprintf("%s%s%s", k, sep, ip)) + } + } + return l +} + +func (h HostsList) MarshalYAML() (interface{}, error) { + list := h.AsList("=") + sort.Strings(list) + return list, nil +} + +func (h HostsList) MarshalJSON() ([]byte, error) { + list := h.AsList("=") + sort.Strings(list) + return json.Marshal(list) +} + +var hostListSerapators = []string{"=", ":"} + +func (h *HostsList) DecodeMapstructure(value interface{}) error { + switch v := value.(type) { + case map[string]interface{}: + list := make(HostsList, len(v)) + for i, e := range v { + if e == nil { + e = "" + } + switch t := e.(type) { + case string: + list[i] = []string{t} + case []any: + hosts := make([]string, len(t)) + for j, h := range t { + hosts[j] = fmt.Sprint(h) + } + list[i] = hosts + default: + return fmt.Errorf("unexpected value type %T for extra_hosts entry", value) + } + } + err := list.cleanup() + if err != nil { + return err + } + *h = list + return nil + case []interface{}: + s := make([]string, len(v)) + for i, e := range v { + s[i] = fmt.Sprint(e) + } + list, err := NewHostsList(s) + if err != nil { + return err + } + *h = list + return nil + default: + return fmt.Errorf("unexpected value type %T for extra_hosts", value) + } +} + +func (h HostsList) cleanup() error { + for host, ips := range h { + // Check that there is a hostname and that it doesn't contain either + // of the allowed separators, to generate a clearer error than the + // engine would do if it splits the string differently. + if host == "" || strings.ContainsAny(host, ":=") { + return fmt.Errorf("bad host name '%s'", host) + } + for i, ip := range ips { + // Remove brackets from IP addresses (for example "[::1]" -> "::1"). + if len(ip) > 2 && ip[0] == '[' && ip[len(ip)-1] == ']' { + ips[i] = ip[1 : len(ip)-1] + } + } + h[host] = ips + } + return nil +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/types/labels.go b/vendor/github.com/compose-spec/compose-go/v2/types/labels.go new file mode 100644 index 00000000..7ea5edc4 --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/types/labels.go @@ -0,0 +1,95 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package types + +import ( + "fmt" + "strings" +) + +// Labels is a mapping type for labels +type Labels map[string]string + +func NewLabelsFromMappingWithEquals(mapping MappingWithEquals) Labels { + labels := Labels{} + for k, v := range mapping { + if v != nil { + labels[k] = *v + } + } + return labels +} + +func (l Labels) Add(key, value string) Labels { + if l == nil { + l = Labels{} + } + l[key] = value + return l +} + +func (l Labels) AsList() []string { + s := make([]string, len(l)) + i := 0 + for k, v := range l { + s[i] = fmt.Sprintf("%s=%s", k, v) + i++ + } + return s +} + +func (l Labels) ToMappingWithEquals() MappingWithEquals { + mapping := MappingWithEquals{} + for k, v := range l { + mapping[k] = &v + } + return mapping +} + +// label value can be a string | number | boolean | null (empty) +func labelValue(e interface{}) string { + if e == nil { + return "" + } + switch v := e.(type) { + case string: + return v + default: + return fmt.Sprint(v) + } +} + +func (l *Labels) DecodeMapstructure(value interface{}) error { + switch v := value.(type) { + case map[string]interface{}: + labels := make(map[string]string, len(v)) + for k, e := range v { + labels[k] = labelValue(e) + } + *l = labels + case []interface{}: + labels := make(map[string]string, len(v)) + for _, s := range v { + k, e, _ := strings.Cut(fmt.Sprint(s), "=") + labels[k] = labelValue(e) + } + *l = labels + default: + return fmt.Errorf("unexpected value type %T for labels", value) + } + return nil +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/types/mapping.go b/vendor/github.com/compose-spec/compose-go/v2/types/mapping.go new file mode 100644 index 00000000..fb14974f --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/types/mapping.go @@ -0,0 +1,230 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package types + +import ( + "fmt" + "sort" + "strings" + "unicode" +) + +// MappingWithEquals is a mapping type that can be converted from a list of +// key[=value] strings. +// For the key with an empty value (`key=`), the mapped value is set to a pointer to `""`. +// For the key without value (`key`), the mapped value is set to nil. +type MappingWithEquals map[string]*string + +// NewMappingWithEquals build a new Mapping from a set of KEY=VALUE strings +func NewMappingWithEquals(values []string) MappingWithEquals { + mapping := MappingWithEquals{} + for _, env := range values { + tokens := strings.SplitN(env, "=", 2) + if len(tokens) > 1 { + mapping[tokens[0]] = &tokens[1] + } else { + mapping[env] = nil + } + } + return mapping +} + +// OverrideBy update MappingWithEquals with values from another MappingWithEquals +func (m MappingWithEquals) OverrideBy(other MappingWithEquals) MappingWithEquals { + for k, v := range other { + m[k] = v + } + return m +} + +// Resolve update a MappingWithEquals for keys without value (`key`, but not `key=`) +func (m MappingWithEquals) Resolve(lookupFn func(string) (string, bool)) MappingWithEquals { + for k, v := range m { + if v == nil { + if value, ok := lookupFn(k); ok { + m[k] = &value + } + } + } + return m +} + +// RemoveEmpty excludes keys that are not associated with a value +func (m MappingWithEquals) RemoveEmpty() MappingWithEquals { + for k, v := range m { + if v == nil { + delete(m, k) + } + } + return m +} + +func (m MappingWithEquals) ToMapping() Mapping { + o := Mapping{} + for k, v := range m { + if v != nil { + o[k] = *v + } + } + return o +} + +func (m *MappingWithEquals) DecodeMapstructure(value interface{}) error { + switch v := value.(type) { + case map[string]interface{}: + mapping := make(MappingWithEquals, len(v)) + for k, e := range v { + mapping[k] = mappingValue(e) + } + *m = mapping + case []interface{}: + mapping := make(MappingWithEquals, len(v)) + for _, s := range v { + k, e, ok := strings.Cut(fmt.Sprint(s), "=") + if k != "" && unicode.IsSpace(rune(k[len(k)-1])) { + return fmt.Errorf("environment variable %s is declared with a trailing space", k) + } + if !ok { + mapping[k] = nil + } else { + mapping[k] = mappingValue(e) + } + } + *m = mapping + default: + return fmt.Errorf("unexpected value type %T for mapping", value) + } + return nil +} + +// label value can be a string | number | boolean | null +func mappingValue(e interface{}) *string { + if e == nil { + return nil + } + switch v := e.(type) { + case string: + return &v + default: + s := fmt.Sprint(v) + return &s + } +} + +// Mapping is a mapping type that can be converted from a list of +// key[=value] strings. +// For the key with an empty value (`key=`), or key without value (`key`), the +// mapped value is set to an empty string `""`. +type Mapping map[string]string + +// NewMapping build a new Mapping from a set of KEY=VALUE strings +func NewMapping(values []string) Mapping { + mapping := Mapping{} + for _, value := range values { + parts := strings.SplitN(value, "=", 2) + key := parts[0] + switch { + case len(parts) == 1: + mapping[key] = "" + default: + mapping[key] = parts[1] + } + } + return mapping +} + +// convert values into a set of KEY=VALUE strings +func (m Mapping) Values() []string { + values := make([]string, 0, len(m)) + for k, v := range m { + values = append(values, fmt.Sprintf("%s=%s", k, v)) + } + sort.Strings(values) + return values +} + +// ToMappingWithEquals converts Mapping into a MappingWithEquals with pointer references +func (m Mapping) ToMappingWithEquals() MappingWithEquals { + mapping := MappingWithEquals{} + for k, v := range m { + mapping[k] = &v + } + return mapping +} + +func (m Mapping) Resolve(s string) (string, bool) { + v, ok := m[s] + return v, ok +} + +func (m Mapping) Clone() Mapping { + clone := Mapping{} + for k, v := range m { + clone[k] = v + } + return clone +} + +// Merge adds all values from second mapping which are not already defined +func (m Mapping) Merge(o Mapping) Mapping { + for k, v := range o { + if _, set := m[k]; !set { + m[k] = v + } + } + return m +} + +func (m *Mapping) DecodeMapstructure(value interface{}) error { + switch v := value.(type) { + case map[string]interface{}: + mapping := make(Mapping, len(v)) + for k, e := range v { + if e == nil { + e = "" + } + mapping[k] = fmt.Sprint(e) + } + *m = mapping + case []interface{}: + *m = decodeMapping(v, "=") + default: + return fmt.Errorf("unexpected value type %T for mapping", value) + } + return nil +} + +// Generate a mapping by splitting strings at any of seps, which will be tried +// in-order for each input string. (For example, to allow the preferred 'host=ip' +// in 'extra_hosts', as well as 'host:ip' for backwards compatibility.) +func decodeMapping(v []interface{}, seps ...string) map[string]string { + mapping := make(Mapping, len(v)) + for _, s := range v { + for i, sep := range seps { + k, e, ok := strings.Cut(fmt.Sprint(s), sep) + if ok { + // Mapping found with this separator, stop here. + mapping[k] = e + break + } else if i == len(seps)-1 { + // No more separators to try, map to empty string. + mapping[k] = "" + } + } + } + return mapping +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/types/models.go b/vendor/github.com/compose-spec/compose-go/v2/types/models.go new file mode 100644 index 00000000..4f144c0a --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/types/models.go @@ -0,0 +1,31 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package types + +type ModelConfig struct { + Name string `yaml:"name,omitempty" json:"name,omitempty"` + Model string `yaml:"model,omitempty" json:"model,omitempty"` + ContextSize int `yaml:"context_size,omitempty" json:"context_size,omitempty"` + RuntimeFlags []string `yaml:"runtime_flags,omitempty" json:"runtime_flags,omitempty"` + Extensions Extensions `yaml:"#extensions,inline,omitempty" json:"-"` +} + +type ServiceModelConfig struct { + EndpointVariable string `yaml:"endpoint_var,omitempty" json:"endpoint_var,omitempty"` + ModelVariable string `yaml:"model_var,omitempty" json:"model_var,omitempty"` + Extensions Extensions `yaml:"#extensions,inline,omitempty" json:"-"` +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/types/options.go b/vendor/github.com/compose-spec/compose-go/v2/types/options.go new file mode 100644 index 00000000..9aadb89c --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/types/options.go @@ -0,0 +1,66 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package types + +import "fmt" + +// Options is a mapping type for options we pass as-is to container runtime +type Options map[string]string + +func (d *Options) DecodeMapstructure(value interface{}) error { + switch v := value.(type) { + case map[string]interface{}: + m := make(map[string]string) + for key, e := range v { + if e == nil { + m[key] = "" + } else { + m[key] = fmt.Sprint(e) + } + } + *d = m + case map[string]string: + *d = v + default: + return fmt.Errorf("invalid type %T for options", value) + } + return nil +} + +// MultiOptions allow option to be repeated +type MultiOptions map[string][]string + +func (d *MultiOptions) DecodeMapstructure(value interface{}) error { + switch v := value.(type) { + case map[string]interface{}: + m := make(map[string][]string) + for key, e := range v { + switch e := e.(type) { + case []interface{}: + for _, v := range e { + m[key] = append(m[key], fmt.Sprint(v)) + } + default: + m[key] = append(m[key], fmt.Sprint(e)) + } + } + *d = m + default: + return fmt.Errorf("invalid type %T for options", value) + } + return nil +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/types/project.go b/vendor/github.com/compose-spec/compose-go/v2/types/project.go new file mode 100644 index 00000000..58330e8d --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/types/project.go @@ -0,0 +1,844 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package types + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "maps" + "os" + "path/filepath" + "slices" + "sort" + + "github.com/compose-spec/compose-go/v2/dotenv" + "github.com/compose-spec/compose-go/v2/errdefs" + "github.com/compose-spec/compose-go/v2/utils" + "github.com/distribution/reference" + godigest "github.com/opencontainers/go-digest" + "go.yaml.in/yaml/v4" + "golang.org/x/sync/errgroup" +) + +// Project is the result of loading a set of compose files +// Since v2, Project are managed as immutable objects. +// Each public functions which mutate Project state now return a copy of the original Project with the expected changes. +type Project struct { + Name string `yaml:"name,omitempty" json:"name,omitempty"` + WorkingDir string `yaml:"-" json:"-"` + Services Services `yaml:"services" json:"services"` + Networks Networks `yaml:"networks,omitempty" json:"networks,omitempty"` + Volumes Volumes `yaml:"volumes,omitempty" json:"volumes,omitempty"` + Secrets Secrets `yaml:"secrets,omitempty" json:"secrets,omitempty"` + Configs Configs `yaml:"configs,omitempty" json:"configs,omitempty"` + Models Models `yaml:"models,omitempty" json:"models,omitempty"` + Extensions Extensions `yaml:"#extensions,inline,omitempty" json:"-"` // https://github.com/golang/go/issues/6213 + + ComposeFiles []string `yaml:"-" json:"-"` + Environment Mapping `yaml:"-" json:"-"` + + // DisabledServices track services which have been disable as profile is not active + DisabledServices Services `yaml:"-" json:"-"` + Profiles []string `yaml:"-" json:"-"` +} + +// ServiceNames return names for all services in this Compose config +func (p *Project) ServiceNames() []string { + var names []string + for k := range p.Services { + names = append(names, k) + } + sort.Strings(names) + return names +} + +// DisabledServiceNames return names for all disabled services in this Compose config +func (p *Project) DisabledServiceNames() []string { + var names []string + for k := range p.DisabledServices { + names = append(names, k) + } + sort.Strings(names) + return names +} + +// VolumeNames return names for all volumes in this Compose config +func (p *Project) VolumeNames() []string { + var names []string + for k := range p.Volumes { + names = append(names, k) + } + sort.Strings(names) + return names +} + +// NetworkNames return names for all volumes in this Compose config +func (p *Project) NetworkNames() []string { + var names []string + for k := range p.Networks { + names = append(names, k) + } + sort.Strings(names) + return names +} + +// SecretNames return names for all secrets in this Compose config +func (p *Project) SecretNames() []string { + var names []string + for k := range p.Secrets { + names = append(names, k) + } + sort.Strings(names) + return names +} + +// ConfigNames return names for all configs in this Compose config +func (p *Project) ConfigNames() []string { + var names []string + for k := range p.Configs { + names = append(names, k) + } + sort.Strings(names) + return names +} + +// ModelNames return names for all models in this Compose config +func (p *Project) ModelNames() []string { + var names []string + for k := range p.Models { + names = append(names, k) + } + sort.Strings(names) + return names +} + +func (p *Project) ServicesWithBuild() []string { + servicesBuild := p.Services.Filter(func(s ServiceConfig) bool { + return s.Build != nil && s.Build.Context != "" + }) + return slices.Collect(maps.Keys(servicesBuild)) +} + +func (p *Project) ServicesWithExtends() []string { + servicesExtends := p.Services.Filter(func(s ServiceConfig) bool { + return s.Extends != nil && *s.Extends != (ExtendsConfig{}) + }) + return slices.Collect(maps.Keys(servicesExtends)) +} + +func (p *Project) ServicesWithDependsOn() []string { + servicesDependsOn := p.Services.Filter(func(s ServiceConfig) bool { + return len(s.DependsOn) > 0 + }) + return slices.Collect(maps.Keys(servicesDependsOn)) +} + +func (p *Project) ServicesWithModels() []string { + servicesModels := p.Services.Filter(func(s ServiceConfig) bool { return len(s.Models) > 0 }) + return slices.Collect(maps.Keys(servicesModels)) +} + +func (p *Project) ServicesWithCapabilities() ([]string, []string, []string) { + capabilities := []string{} + gpu := []string{} + tpu := []string{} + for _, service := range p.Services { + deploy := service.Deploy + if deploy == nil { + continue + } + reservation := deploy.Resources.Reservations + if reservation == nil { + continue + } + devices := reservation.Devices + for _, d := range devices { + if len(d.Capabilities) > 0 { + capabilities = append(capabilities, service.Name) + } + for _, c := range d.Capabilities { + switch c { + case "gpu": + gpu = append(gpu, service.Name) + case "tpu": + tpu = append(tpu, service.Name) + } + } + } + } + + return utils.RemoveDuplicates(capabilities), utils.RemoveDuplicates(gpu), utils.RemoveDuplicates(tpu) +} + +// GetServices retrieve services by names, or return all services if no name specified +func (p *Project) GetServices(names ...string) (Services, error) { + if len(names) == 0 { + return p.Services, nil + } + services := Services{} + for _, name := range names { + service, err := p.GetService(name) + if err != nil { + return nil, err + } + services[name] = service + } + return services, nil +} + +func (p *Project) getServicesByNames(names ...string) (Services, []string) { + if len(names) == 0 { + return p.Services, nil + } + + services := Services{} + var servicesNotFound []string + for _, name := range names { + matched := false + + for serviceName, service := range p.Services { + match, _ := filepath.Match(name, serviceName) + if match { + services[serviceName] = service + matched = true + } + } + + if !matched { + servicesNotFound = append(servicesNotFound, name) + } + } + + return services, servicesNotFound +} + +// GetDisabledService retrieve disabled service by name +func (p Project) GetDisabledService(name string) (ServiceConfig, error) { + service, ok := p.DisabledServices[name] + if !ok { + return ServiceConfig{}, fmt.Errorf("no such service: %s", name) + } + return service, nil +} + +// GetService retrieve a specific service by name +func (p *Project) GetService(name string) (ServiceConfig, error) { + service, ok := p.Services[name] + if !ok { + _, ok := p.DisabledServices[name] + if ok { + return ServiceConfig{}, fmt.Errorf("no such service: %s: %w", name, errdefs.ErrDisabled) + } + return ServiceConfig{}, fmt.Errorf("no such service: %s: %w", name, errdefs.ErrNotFound) + } + return service, nil +} + +func (p *Project) AllServices() Services { + all := Services{} + for name, service := range p.Services { + all[name] = service + } + for name, service := range p.DisabledServices { + all[name] = service + } + return all +} + +type ServiceFunc func(name string, service *ServiceConfig) error + +// ForEachService runs ServiceFunc on each service and dependencies according to DependencyPolicy +func (p *Project) ForEachService(names []string, fn ServiceFunc, options ...DependencyOption) error { + if len(options) == 0 { + // backward compatibility + options = []DependencyOption{IncludeDependencies} + } + return p.withServices(names, fn, map[string]bool{}, options, map[string]ServiceDependency{}) +} + +type withServicesOptions struct { + dependencyPolicy int +} + +const ( + includeDependencies = iota + includeDependents + ignoreDependencies +) + +func (p *Project) withServices(names []string, fn ServiceFunc, seen map[string]bool, options []DependencyOption, dependencies map[string]ServiceDependency) error { + services, servicesNotFound := p.getServicesByNames(names...) + if len(servicesNotFound) > 0 { + for _, serviceNotFound := range servicesNotFound { + if dependency, ok := dependencies[serviceNotFound]; !ok || dependency.Required { + return fmt.Errorf("no such service: %s", serviceNotFound) + } + } + } + opts := withServicesOptions{ + dependencyPolicy: includeDependencies, + } + for _, option := range options { + option(&opts) + } + + for name, service := range services { + if seen[name] { + continue + } + seen[name] = true + var dependencies map[string]ServiceDependency + switch opts.dependencyPolicy { + case includeDependents: + dependencies = utils.MapsAppend(dependencies, p.dependentsForService(service)) + case includeDependencies: + dependencies = utils.MapsAppend(dependencies, service.DependsOn) + case ignoreDependencies: + // Noop + } + if len(dependencies) > 0 { + err := p.withServices(utils.MapKeys(dependencies), fn, seen, options, dependencies) + if err != nil { + return err + } + } + if err := fn(name, service.deepCopy()); err != nil { + return err + } + } + return nil +} + +func (p *Project) GetDependentsForService(s ServiceConfig, filter ...func(ServiceDependency) bool) []string { + return utils.MapKeys(p.dependentsForService(s, filter...)) +} + +func (p *Project) dependentsForService(s ServiceConfig, filter ...func(ServiceDependency) bool) map[string]ServiceDependency { + dependent := make(map[string]ServiceDependency) + for _, service := range p.Services { + for name, dependency := range service.DependsOn { + if name == s.Name { + depends := true + for _, f := range filter { + if !f(dependency) { + depends = false + break + } + } + if depends { + dependent[service.Name] = dependency + } + } + } + } + return dependent +} + +// RelativePath resolve a relative path based project's working directory +func (p *Project) RelativePath(path string) string { + if path[0] == '~' { + home, _ := os.UserHomeDir() + path = filepath.Join(home, path[1:]) + } + if filepath.IsAbs(path) { + return path + } + return filepath.Join(p.WorkingDir, path) +} + +// HasProfile return true if service has no profile declared or has at least one profile matching +func (s ServiceConfig) HasProfile(profiles []string) bool { + if len(s.Profiles) == 0 { + return true + } + for _, p := range profiles { + if p == "*" { + return true + } + for _, sp := range s.Profiles { + if sp == p { + return true + } + } + } + return false +} + +// WithProfiles disables services which don't match selected profiles +// It returns a new Project instance with the changes and keep the original Project unchanged +func (p *Project) WithProfiles(profiles []string) (*Project, error) { + newProject := p.deepCopy() + enabled := Services{} + disabled := Services{} + for name, service := range newProject.AllServices() { + if service.HasProfile(profiles) { + enabled[name] = service + } else { + disabled[name] = service + } + } + newProject.Services = enabled + newProject.DisabledServices = disabled + newProject.Profiles = profiles + return newProject, nil +} + +// WithServicesEnabled ensures services are enabled and activate profiles accordingly +// It returns a new Project instance with the changes and keep the original Project unchanged +func (p *Project) WithServicesEnabled(names ...string) (*Project, error) { + newProject := p.deepCopy() + if len(names) == 0 { + return newProject, nil + } + + profiles := append([]string{}, p.Profiles...) + for _, name := range names { + if _, ok := newProject.Services[name]; ok { + // already enabled + continue + } + service := p.DisabledServices[name] + profiles = append(profiles, service.Profiles...) + } + return newProject.WithProfiles(profiles) +} + +// WithoutUnnecessaryResources drops networks/volumes/secrets/configs that are not referenced by active services +// It returns a new Project instance with the changes and keep the original Project unchanged +func (p *Project) WithoutUnnecessaryResources() *Project { + newProject := p.deepCopy() + requiredNetworks := map[string]struct{}{} + requiredVolumes := map[string]struct{}{} + requiredSecrets := map[string]struct{}{} + requiredConfigs := map[string]struct{}{} + requiredModels := map[string]struct{}{} + for _, s := range newProject.Services { + for k := range s.Networks { + requiredNetworks[k] = struct{}{} + } + for _, v := range s.Volumes { + if v.Type != VolumeTypeVolume || v.Source == "" { + continue + } + requiredVolumes[v.Source] = struct{}{} + } + for _, v := range s.Secrets { + requiredSecrets[v.Source] = struct{}{} + } + if s.Build != nil { + for _, v := range s.Build.Secrets { + requiredSecrets[v.Source] = struct{}{} + } + } + for _, v := range s.Configs { + requiredConfigs[v.Source] = struct{}{} + } + for m := range s.Models { + requiredModels[m] = struct{}{} + } + } + + networks := Networks{} + for k := range requiredNetworks { + if value, ok := p.Networks[k]; ok { + networks[k] = value + } + } + newProject.Networks = networks + + volumes := Volumes{} + for k := range requiredVolumes { + if value, ok := p.Volumes[k]; ok { + volumes[k] = value + } + } + newProject.Volumes = volumes + + secrets := Secrets{} + for k := range requiredSecrets { + if value, ok := p.Secrets[k]; ok { + secrets[k] = value + } + } + newProject.Secrets = secrets + + configs := Configs{} + for k := range requiredConfigs { + if value, ok := p.Configs[k]; ok { + configs[k] = value + } + } + newProject.Configs = configs + + models := Models{} + for k := range requiredModels { + if value, ok := p.Models[k]; ok { + models[k] = value + } + } + newProject.Models = models + return newProject +} + +type DependencyOption func(options *withServicesOptions) + +func IncludeDependencies(options *withServicesOptions) { + options.dependencyPolicy = includeDependencies +} + +func IncludeDependents(options *withServicesOptions) { + options.dependencyPolicy = includeDependents +} + +func IgnoreDependencies(options *withServicesOptions) { + options.dependencyPolicy = ignoreDependencies +} + +// WithSelectedServices restricts the project model to selected services and dependencies +// It returns a new Project instance with the changes and keep the original Project unchanged +func (p *Project) WithSelectedServices(names []string, options ...DependencyOption) (*Project, error) { + newProject := p.deepCopy() + if len(names) == 0 { + // All services + return newProject, nil + } + + set := utils.NewSet[string]() + err := p.ForEachService(names, func(name string, _ *ServiceConfig) error { + set.Add(name) + return nil + }, options...) + if err != nil { + return nil, err + } + + // Disable all services which are not explicit target or dependencies + enabled := Services{} + for name, s := range newProject.Services { + if _, ok := set[name]; ok { + // remove all dependencies but those implied by explicitly selected services + dependencies := s.DependsOn + for d := range dependencies { + if _, ok := set[d]; !ok { + delete(dependencies, d) + } + } + s.DependsOn = dependencies + enabled[name] = s + } else { + newProject = newProject.WithServicesDisabled(name) + } + } + newProject.Services = enabled + return newProject, nil +} + +// WithServicesDisabled removes from the project model the given services and their references in all dependencies +// It returns a new Project instance with the changes and keep the original Project unchanged +func (p *Project) WithServicesDisabled(names ...string) *Project { + newProject := p.deepCopy() + if len(names) == 0 { + return newProject + } + if newProject.DisabledServices == nil { + newProject.DisabledServices = Services{} + } + for _, name := range names { + // We should remove all dependencies which reference the disabled service + for i, s := range newProject.Services { + if _, ok := s.DependsOn[name]; ok { + delete(s.DependsOn, name) + newProject.Services[i] = s + } + } + if service, ok := newProject.Services[name]; ok { + newProject.DisabledServices[name] = service + delete(newProject.Services, name) + } + } + return newProject +} + +// WithImagesResolved updates services images to include digest computed by a resolver function +// It returns a new Project instance with the changes and keep the original Project unchanged +func (p *Project) WithImagesResolved(resolver func(named reference.Named) (godigest.Digest, error)) (*Project, error) { + return p.WithServicesTransform(func(_ string, service ServiceConfig) (ServiceConfig, error) { + if service.Image == "" { + return service, nil + } + named, err := reference.ParseDockerRef(service.Image) + if err != nil { + return service, err + } + + if _, ok := named.(reference.Canonical); !ok { + // image is named but not digested reference + digest, err := resolver(named) + if err != nil { + return service, err + } + named, err = reference.WithDigest(named, digest) + if err != nil { + return service, err + } + } + service.Image = named.String() + return service, nil + }) +} + +type marshallOptions struct { + secretsContent bool +} + +func WithSecretContent(o *marshallOptions) { + o.secretsContent = true +} + +func (opt *marshallOptions) apply(p *Project) *Project { + if opt.secretsContent { + p = p.deepCopy() + for name, config := range p.Secrets { + config.marshallContent = true + p.Secrets[name] = config + } + } + return p +} + +func applyMarshallOptions(p *Project, options ...func(*marshallOptions)) *Project { + opts := &marshallOptions{} + for _, option := range options { + option(opts) + } + p = opts.apply(p) + return p +} + +// MarshalYAML marshal Project into a yaml tree +func (p *Project) MarshalYAML(options ...func(*marshallOptions)) ([]byte, error) { + buf := bytes.NewBuffer([]byte{}) + encoder := yaml.NewEncoder(buf) + encoder.SetIndent(2) + // encoder.CompactSeqIndent() FIXME https://github.com/go-yaml/yaml/pull/753 + src := applyMarshallOptions(p, options...) + err := encoder.Encode(src) + if err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +// MarshalJSON marshal Project into a json document +func (p *Project) MarshalJSON(options ...func(*marshallOptions)) ([]byte, error) { + src := applyMarshallOptions(p, options...) + m := map[string]interface{}{ + "name": src.Name, + "services": src.Services, + } + + if len(src.Networks) > 0 { + m["networks"] = src.Networks + } + if len(src.Volumes) > 0 { + m["volumes"] = src.Volumes + } + if len(src.Secrets) > 0 { + m["secrets"] = src.Secrets + } + if len(src.Configs) > 0 { + m["configs"] = src.Configs + } + for k, v := range src.Extensions { + m[k] = v + } + return json.MarshalIndent(m, "", " ") +} + +// WithServicesEnvironmentResolved parses env_files set for services to resolve the actual environment map for services +// It returns a new Project instance with the changes and keep the original Project unchanged +func (p Project) WithServicesEnvironmentResolved(discardEnvFiles bool) (*Project, error) { + newProject := p.deepCopy() + for i, service := range newProject.Services { + service.Environment = service.Environment.Resolve(newProject.Environment.Resolve) + + environment := service.Environment.ToMapping() + for _, envFile := range service.EnvFiles { + err := loadEnvFile(envFile, environment, func(k string) (string, bool) { + // project.env has precedence doing interpolation + if resolve, ok := p.Environment.Resolve(k); ok { + return resolve, true + } + // then service.environment + if s, ok := service.Environment[k]; ok && s != nil { + return *s, true + } + return "", false + }) + if err != nil { + return nil, err + } + } + + service.Environment = environment.ToMappingWithEquals().OverrideBy(service.Environment) + + if discardEnvFiles { + service.EnvFiles = nil + } + newProject.Services[i] = service + } + return newProject, nil +} + +// WithServicesLabelsResolved parses label_files set for services to resolve the actual label map for services +// It returns a new Project instance with the changes and keep the original Project unchanged +func (p Project) WithServicesLabelsResolved(discardLabelFiles bool) (*Project, error) { + newProject := p.deepCopy() + for i, service := range newProject.Services { + labels := MappingWithEquals{} + // resolve variables based on other files we already parsed + var resolve dotenv.LookupFn = func(s string) (string, bool) { + v, ok := labels[s] + if ok && v != nil { + return *v, ok + } + return "", false + } + + for _, labelFile := range service.LabelFiles { + vars, err := loadLabelFile(labelFile, resolve) + if err != nil { + return nil, err + } + labels.OverrideBy(vars.ToMappingWithEquals()) + } + + labels = labels.OverrideBy(service.Labels.ToMappingWithEquals()) + if len(labels) == 0 { + labels = nil + } else { + service.Labels = NewLabelsFromMappingWithEquals(labels) + } + + if discardLabelFiles { + service.LabelFiles = nil + } + newProject.Services[i] = service + } + return newProject, nil +} + +func loadEnvFile(envFile EnvFile, environment Mapping, resolve dotenv.LookupFn) error { + if _, err := os.Stat(envFile.Path); os.IsNotExist(err) { + if envFile.Required { + return fmt.Errorf("env file %s not found: %w", envFile.Path, err) + } + return nil + } + + err := loadMappingFile(envFile.Path, envFile.Format, environment, resolve) + return err +} + +func loadLabelFile(labelFile string, resolve dotenv.LookupFn) (Mapping, error) { + if _, err := os.Stat(labelFile); os.IsNotExist(err) { + return nil, fmt.Errorf("label file %s not found: %w", labelFile, err) + } + + labels := Mapping{} + err := loadMappingFile(labelFile, "", labels, resolve) + return labels, err +} + +func loadMappingFile(path string, format string, vars Mapping, resolve dotenv.LookupFn) error { + file, err := os.Open(path) + if err != nil { + return err + } + defer file.Close() + + return dotenv.ParseWithFormat(file, path, vars, resolve, format) +} + +func (p *Project) deepCopy() *Project { + if p == nil { + return nil + } + n := &Project{} + deriveDeepCopyProject(n, p) + return n +} + +// WithServicesTransform applies a transformation to project services and return a new project with transformation results +func (p *Project) WithServicesTransform(fn func(name string, s ServiceConfig) (ServiceConfig, error)) (*Project, error) { + type result struct { + name string + service ServiceConfig + } + expect := len(p.Services) + resultCh := make(chan result, expect) + newProject := p.deepCopy() + + eg, ctx := errgroup.WithContext(context.Background()) + eg.Go(func() error { + s := Services{} + for expect > 0 { + select { + case <-ctx.Done(): + // interrupted as some goroutine returned an error + return nil + case r := <-resultCh: + s[r.name] = r.service + expect-- + } + } + newProject.Services = s + return nil + }) + for n, s := range newProject.Services { + name := n + service := s + eg.Go(func() error { + updated, err := fn(name, service) + if err != nil { + return err + } + resultCh <- result{ + name: name, + service: updated, + } + return nil + }) + } + return newProject, eg.Wait() +} + +// CheckContainerNameUnicity validate project doesn't have services declaring the same container_name +func (p *Project) CheckContainerNameUnicity() error { + names := utils.Set[string]{} + for name, s := range p.Services { + if s.ContainerName != "" { + if existing, ok := names[s.ContainerName]; ok { + return fmt.Errorf(`services.%s: container name %q is already in use by service %s"`, name, s.ContainerName, existing) + } + names.Add(s.ContainerName) + } + } + return nil +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/types/services.go b/vendor/github.com/compose-spec/compose-go/v2/types/services.go new file mode 100644 index 00000000..0efc4b9f --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/types/services.go @@ -0,0 +1,45 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package types + +// Services is a map of ServiceConfig +type Services map[string]ServiceConfig + +// GetProfiles retrieve the profiles implicitly enabled by explicitly targeting selected services +func (s Services) GetProfiles() []string { + set := map[string]struct{}{} + for _, service := range s { + for _, p := range service.Profiles { + set[p] = struct{}{} + } + } + var profiles []string + for k := range set { + profiles = append(profiles, k) + } + return profiles +} + +func (s Services) Filter(predicate func(ServiceConfig) bool) Services { + services := Services{} + for name, service := range s { + if predicate(service) { + services[name] = service + } + } + return services +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/types/ssh.go b/vendor/github.com/compose-spec/compose-go/v2/types/ssh.go new file mode 100644 index 00000000..6d0edb69 --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/types/ssh.go @@ -0,0 +1,73 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package types + +import ( + "fmt" +) + +type SSHKey struct { + ID string `yaml:"id,omitempty" json:"id,omitempty"` + Path string `path:"path,omitempty" json:"path,omitempty"` +} + +// SSHConfig is a mapping type for SSH build config +type SSHConfig []SSHKey + +func (s SSHConfig) Get(id string) (string, error) { + for _, sshKey := range s { + if sshKey.ID == id { + return sshKey.Path, nil + } + } + return "", fmt.Errorf("ID %s not found in SSH keys", id) +} + +// MarshalYAML makes SSHKey implement yaml.Marshaller +func (s SSHKey) MarshalYAML() (interface{}, error) { + if s.Path == "" { + return s.ID, nil + } + return fmt.Sprintf("%s: %s", s.ID, s.Path), nil +} + +// MarshalJSON makes SSHKey implement json.Marshaller +func (s SSHKey) MarshalJSON() ([]byte, error) { + if s.Path == "" { + return []byte(fmt.Sprintf(`%q`, s.ID)), nil + } + return []byte(fmt.Sprintf(`%q: %s`, s.ID, s.Path)), nil +} + +func (s *SSHConfig) DecodeMapstructure(value interface{}) error { + v, ok := value.(map[string]any) + if !ok { + return fmt.Errorf("invalid ssh config type %T", value) + } + result := make(SSHConfig, len(v)) + i := 0 + for id, path := range v { + key := SSHKey{ID: id} + if path != nil { + key.Path = fmt.Sprint(path) + } + result[i] = key + i++ + } + *s = result + return nil +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/types/stringOrList.go b/vendor/github.com/compose-spec/compose-go/v2/types/stringOrList.go new file mode 100644 index 00000000..a6720df0 --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/types/stringOrList.go @@ -0,0 +1,61 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package types + +import "fmt" + +// StringList is a type for fields that can be a string or list of strings +type StringList []string + +func (l *StringList) DecodeMapstructure(value interface{}) error { + switch v := value.(type) { + case string: + *l = []string{v} + case []interface{}: + list := make([]string, len(v)) + for i, e := range v { + val, ok := e.(string) + if !ok { + return fmt.Errorf("invalid type %T for string list", value) + } + list[i] = val + } + *l = list + default: + return fmt.Errorf("invalid type %T for string list", value) + } + return nil +} + +// StringOrNumberList is a type for fields that can be a list of strings or numbers +type StringOrNumberList []string + +func (l *StringOrNumberList) DecodeMapstructure(value interface{}) error { + switch v := value.(type) { + case string: + *l = []string{v} + case []interface{}: + list := make([]string, len(v)) + for i, e := range v { + list[i] = fmt.Sprint(e) + } + *l = list + default: + return fmt.Errorf("invalid type %T for string list", value) + } + return nil +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/types/types.go b/vendor/github.com/compose-spec/compose-go/v2/types/types.go new file mode 100644 index 00000000..fd4f3513 --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/types/types.go @@ -0,0 +1,878 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package types + +import ( + "encoding/json" + "fmt" + "sort" + "strconv" + "strings" + "time" + + "github.com/docker/go-connections/nat" + "github.com/xhit/go-str2duration/v2" +) + +// ServiceConfig is the configuration of one service +type ServiceConfig struct { + Name string `yaml:"name,omitempty" json:"-"` + Profiles []string `yaml:"profiles,omitempty" json:"profiles,omitempty"` + + Annotations Mapping `yaml:"annotations,omitempty" json:"annotations,omitempty"` + Attach *bool `yaml:"attach,omitempty" json:"attach,omitempty"` + Build *BuildConfig `yaml:"build,omitempty" json:"build,omitempty"` + Develop *DevelopConfig `yaml:"develop,omitempty" json:"develop,omitempty"` + BlkioConfig *BlkioConfig `yaml:"blkio_config,omitempty" json:"blkio_config,omitempty"` + CapAdd []string `yaml:"cap_add,omitempty" json:"cap_add,omitempty"` + CapDrop []string `yaml:"cap_drop,omitempty" json:"cap_drop,omitempty"` + CgroupParent string `yaml:"cgroup_parent,omitempty" json:"cgroup_parent,omitempty"` + Cgroup string `yaml:"cgroup,omitempty" json:"cgroup,omitempty"` + CPUCount int64 `yaml:"cpu_count,omitempty" json:"cpu_count,omitempty"` + CPUPercent float32 `yaml:"cpu_percent,omitempty" json:"cpu_percent,omitempty"` + CPUPeriod int64 `yaml:"cpu_period,omitempty" json:"cpu_period,omitempty"` + CPUQuota int64 `yaml:"cpu_quota,omitempty" json:"cpu_quota,omitempty"` + CPURTPeriod int64 `yaml:"cpu_rt_period,omitempty" json:"cpu_rt_period,omitempty"` + CPURTRuntime int64 `yaml:"cpu_rt_runtime,omitempty" json:"cpu_rt_runtime,omitempty"` + CPUS float32 `yaml:"cpus,omitempty" json:"cpus,omitempty"` + CPUSet string `yaml:"cpuset,omitempty" json:"cpuset,omitempty"` + CPUShares int64 `yaml:"cpu_shares,omitempty" json:"cpu_shares,omitempty"` + + // Command for the service containers. + // If set, overrides COMMAND from the image. + // + // Set to `[]` or an empty string to clear the command from the image. + Command ShellCommand `yaml:"command,omitempty" json:"command"` // NOTE: we can NOT omitempty for JSON! see ShellCommand type for details. + + Configs []ServiceConfigObjConfig `yaml:"configs,omitempty" json:"configs,omitempty"` + ContainerName string `yaml:"container_name,omitempty" json:"container_name,omitempty"` + CredentialSpec *CredentialSpecConfig `yaml:"credential_spec,omitempty" json:"credential_spec,omitempty"` + DependsOn DependsOnConfig `yaml:"depends_on,omitempty" json:"depends_on,omitempty"` + Deploy *DeployConfig `yaml:"deploy,omitempty" json:"deploy,omitempty"` + DeviceCgroupRules []string `yaml:"device_cgroup_rules,omitempty" json:"device_cgroup_rules,omitempty"` + Devices []DeviceMapping `yaml:"devices,omitempty" json:"devices,omitempty"` + DNS StringList `yaml:"dns,omitempty" json:"dns,omitempty"` + DNSOpts []string `yaml:"dns_opt,omitempty" json:"dns_opt,omitempty"` + DNSSearch StringList `yaml:"dns_search,omitempty" json:"dns_search,omitempty"` + Dockerfile string `yaml:"dockerfile,omitempty" json:"dockerfile,omitempty"` + DomainName string `yaml:"domainname,omitempty" json:"domainname,omitempty"` + + // Entrypoint for the service containers. + // If set, overrides ENTRYPOINT from the image. + // + // Set to `[]` or an empty string to clear the entrypoint from the image. + Entrypoint ShellCommand `yaml:"entrypoint,omitempty" json:"entrypoint"` // NOTE: we can NOT omitempty for JSON! see ShellCommand type for details. + Provider *ServiceProviderConfig `yaml:"provider,omitempty" json:"provider,omitempty"` + Environment MappingWithEquals `yaml:"environment,omitempty" json:"environment,omitempty"` + EnvFiles []EnvFile `yaml:"env_file,omitempty" json:"env_file,omitempty"` + Expose StringOrNumberList `yaml:"expose,omitempty" json:"expose,omitempty"` + Extends *ExtendsConfig `yaml:"extends,omitempty" json:"extends,omitempty"` + ExternalLinks []string `yaml:"external_links,omitempty" json:"external_links,omitempty"` + ExtraHosts HostsList `yaml:"extra_hosts,omitempty" json:"extra_hosts,omitempty"` + GroupAdd []string `yaml:"group_add,omitempty" json:"group_add,omitempty"` + Gpus []DeviceRequest `yaml:"gpus,omitempty" json:"gpus,omitempty"` + Hostname string `yaml:"hostname,omitempty" json:"hostname,omitempty"` + HealthCheck *HealthCheckConfig `yaml:"healthcheck,omitempty" json:"healthcheck,omitempty"` + Image string `yaml:"image,omitempty" json:"image,omitempty"` + Init *bool `yaml:"init,omitempty" json:"init,omitempty"` + Ipc string `yaml:"ipc,omitempty" json:"ipc,omitempty"` + Isolation string `yaml:"isolation,omitempty" json:"isolation,omitempty"` + Labels Labels `yaml:"labels,omitempty" json:"labels,omitempty"` + LabelFiles []string `yaml:"label_file,omitempty" json:"label_file,omitempty"` + CustomLabels Labels `yaml:"-" json:"-"` + Links []string `yaml:"links,omitempty" json:"links,omitempty"` + Logging *LoggingConfig `yaml:"logging,omitempty" json:"logging,omitempty"` + LogDriver string `yaml:"log_driver,omitempty" json:"log_driver,omitempty"` + LogOpt map[string]string `yaml:"log_opt,omitempty" json:"log_opt,omitempty"` + MemLimit UnitBytes `yaml:"mem_limit,omitempty" json:"mem_limit,omitempty"` + MemReservation UnitBytes `yaml:"mem_reservation,omitempty" json:"mem_reservation,omitempty"` + MemSwapLimit UnitBytes `yaml:"memswap_limit,omitempty" json:"memswap_limit,omitempty"` + MemSwappiness UnitBytes `yaml:"mem_swappiness,omitempty" json:"mem_swappiness,omitempty"` + MacAddress string `yaml:"mac_address,omitempty" json:"mac_address,omitempty"` + Models map[string]*ServiceModelConfig `yaml:"models,omitempty" json:"models,omitempty"` + Net string `yaml:"net,omitempty" json:"net,omitempty"` + NetworkMode string `yaml:"network_mode,omitempty" json:"network_mode,omitempty"` + Networks map[string]*ServiceNetworkConfig `yaml:"networks,omitempty" json:"networks,omitempty"` + OomKillDisable bool `yaml:"oom_kill_disable,omitempty" json:"oom_kill_disable,omitempty"` + OomScoreAdj int64 `yaml:"oom_score_adj,omitempty" json:"oom_score_adj,omitempty"` + Pid string `yaml:"pid,omitempty" json:"pid,omitempty"` + PidsLimit int64 `yaml:"pids_limit,omitempty" json:"pids_limit,omitempty"` + Platform string `yaml:"platform,omitempty" json:"platform,omitempty"` + Ports []ServicePortConfig `yaml:"ports,omitempty" json:"ports,omitempty"` + Privileged bool `yaml:"privileged,omitempty" json:"privileged,omitempty"` + PullPolicy string `yaml:"pull_policy,omitempty" json:"pull_policy,omitempty"` + ReadOnly bool `yaml:"read_only,omitempty" json:"read_only,omitempty"` + Restart string `yaml:"restart,omitempty" json:"restart,omitempty"` + Runtime string `yaml:"runtime,omitempty" json:"runtime,omitempty"` + Scale *int `yaml:"scale,omitempty" json:"scale,omitempty"` + Secrets []ServiceSecretConfig `yaml:"secrets,omitempty" json:"secrets,omitempty"` + SecurityOpt []string `yaml:"security_opt,omitempty" json:"security_opt,omitempty"` + ShmSize UnitBytes `yaml:"shm_size,omitempty" json:"shm_size,omitempty"` + StdinOpen bool `yaml:"stdin_open,omitempty" json:"stdin_open,omitempty"` + StopGracePeriod *Duration `yaml:"stop_grace_period,omitempty" json:"stop_grace_period,omitempty"` + StopSignal string `yaml:"stop_signal,omitempty" json:"stop_signal,omitempty"` + StorageOpt map[string]string `yaml:"storage_opt,omitempty" json:"storage_opt,omitempty"` + Sysctls Mapping `yaml:"sysctls,omitempty" json:"sysctls,omitempty"` + Tmpfs StringList `yaml:"tmpfs,omitempty" json:"tmpfs,omitempty"` + Tty bool `yaml:"tty,omitempty" json:"tty,omitempty"` + Ulimits map[string]*UlimitsConfig `yaml:"ulimits,omitempty" json:"ulimits,omitempty"` + UseAPISocket bool `yaml:"use_api_socket,omitempty" json:"use_api_socket,omitempty"` + User string `yaml:"user,omitempty" json:"user,omitempty"` + UserNSMode string `yaml:"userns_mode,omitempty" json:"userns_mode,omitempty"` + Uts string `yaml:"uts,omitempty" json:"uts,omitempty"` + VolumeDriver string `yaml:"volume_driver,omitempty" json:"volume_driver,omitempty"` + Volumes []ServiceVolumeConfig `yaml:"volumes,omitempty" json:"volumes,omitempty"` + VolumesFrom []string `yaml:"volumes_from,omitempty" json:"volumes_from,omitempty"` + WorkingDir string `yaml:"working_dir,omitempty" json:"working_dir,omitempty"` + PostStart []ServiceHook `yaml:"post_start,omitempty" json:"post_start,omitempty"` + PreStop []ServiceHook `yaml:"pre_stop,omitempty" json:"pre_stop,omitempty"` + + Extensions Extensions `yaml:"#extensions,inline,omitempty" json:"-"` +} + +type ServiceProviderConfig struct { + Type string `yaml:"type,omitempty" json:"type,omitempty"` + Options MultiOptions `yaml:"options,omitempty" json:"options,omitempty"` + Extensions Extensions `yaml:"#extensions,inline,omitempty" json:"-"` +} + +// MarshalYAML makes ServiceConfig implement yaml.Marshaller +func (s ServiceConfig) MarshalYAML() (interface{}, error) { + type t ServiceConfig + value := t(s) + value.Name = "" // set during map to slice conversion, not part of the yaml representation + return value, nil +} + +// NetworksByPriority return the service networks IDs sorted according to Priority +func (s *ServiceConfig) NetworksByPriority() []string { + type key struct { + name string + priority int + } + var keys []key + for k, v := range s.Networks { + priority := 0 + if v != nil { + priority = v.Priority + } + keys = append(keys, key{ + name: k, + priority: priority, + }) + } + sort.Slice(keys, func(i, j int) bool { + if keys[i].priority == keys[j].priority { + return keys[i].name < keys[j].name + } + return keys[i].priority > keys[j].priority + }) + var sorted []string + for _, k := range keys { + sorted = append(sorted, k.name) + } + return sorted +} + +func (s *ServiceConfig) GetScale() int { + if s.Scale != nil { + return *s.Scale + } + if s.Deploy != nil && s.Deploy.Replicas != nil { + // this should not be required as compose-go enforce consistency between scale anr replicas + return *s.Deploy.Replicas + } + return 1 +} + +func (s *ServiceConfig) SetScale(scale int) { + s.Scale = &scale + if s.Deploy != nil { + s.Deploy.Replicas = &scale + } +} + +func (s *ServiceConfig) deepCopy() *ServiceConfig { + if s == nil { + return nil + } + n := &ServiceConfig{} + deriveDeepCopyService(n, s) + return n +} + +const ( + // PullPolicyAlways always pull images + PullPolicyAlways = "always" + // PullPolicyNever never pull images + PullPolicyNever = "never" + // PullPolicyIfNotPresent pull missing images + PullPolicyIfNotPresent = "if_not_present" + // PullPolicyMissing pull missing images + PullPolicyMissing = "missing" + // PullPolicyBuild force building images + PullPolicyBuild = "build" + // PullPolicyRefresh checks if image needs to be updated + PullPolicyRefresh = "refresh" +) + +const ( + // RestartPolicyAlways always restart the container if it stops + RestartPolicyAlways = "always" + // RestartPolicyOnFailure restart the container if it exits due to an error + RestartPolicyOnFailure = "on-failure" + // RestartPolicyNo do not automatically restart the container + RestartPolicyNo = "no" + // RestartPolicyUnlessStopped always restart the container unless the container is stopped (manually or otherwise) + RestartPolicyUnlessStopped = "unless-stopped" +) + +const ( + // ServicePrefix is the prefix for references pointing to a service + ServicePrefix = "service:" + // ContainerPrefix is the prefix for references pointing to a container + ContainerPrefix = "container:" + + // NetworkModeServicePrefix is the prefix for network_mode pointing to a service + // Deprecated prefer ServicePrefix + NetworkModeServicePrefix = ServicePrefix + // NetworkModeContainerPrefix is the prefix for network_mode pointing to a container + // Deprecated prefer ContainerPrefix + NetworkModeContainerPrefix = ContainerPrefix +) + +const ( + SecretConfigXValue = "x-#value" +) + +// GetDependencies retrieves all services this service depends on +func (s ServiceConfig) GetDependencies() []string { + var dependencies []string + for service := range s.DependsOn { + dependencies = append(dependencies, service) + } + return dependencies +} + +// GetDependents retrieves all services which depend on this service +func (s ServiceConfig) GetDependents(p *Project) []string { + var dependent []string + for _, service := range p.Services { + for name := range service.DependsOn { + if name == s.Name { + dependent = append(dependent, service.Name) + } + } + } + return dependent +} + +func (s ServiceConfig) GetPullPolicy() (string, time.Duration, error) { + switch s.PullPolicy { + case PullPolicyAlways, PullPolicyNever, PullPolicyIfNotPresent, PullPolicyMissing, PullPolicyBuild: + return s.PullPolicy, 0, nil + case "daily": + return PullPolicyRefresh, 24 * time.Hour, nil + case "weekly": + return PullPolicyRefresh, 7 * 24 * time.Hour, nil + default: + if strings.HasPrefix(s.PullPolicy, "every_") { + delay := s.PullPolicy[6:] + duration, err := str2duration.ParseDuration(delay) + if err != nil { + return "", 0, err + } + return PullPolicyRefresh, duration, nil + } + return PullPolicyMissing, 0, nil + } +} + +// BlkioConfig define blkio config +type BlkioConfig struct { + Weight uint16 `yaml:"weight,omitempty" json:"weight,omitempty"` + WeightDevice []WeightDevice `yaml:"weight_device,omitempty" json:"weight_device,omitempty"` + DeviceReadBps []ThrottleDevice `yaml:"device_read_bps,omitempty" json:"device_read_bps,omitempty"` + DeviceReadIOps []ThrottleDevice `yaml:"device_read_iops,omitempty" json:"device_read_iops,omitempty"` + DeviceWriteBps []ThrottleDevice `yaml:"device_write_bps,omitempty" json:"device_write_bps,omitempty"` + DeviceWriteIOps []ThrottleDevice `yaml:"device_write_iops,omitempty" json:"device_write_iops,omitempty"` + + Extensions Extensions `yaml:"#extensions,inline,omitempty" json:"-"` +} + +type DeviceMapping struct { + Source string `yaml:"source,omitempty" json:"source,omitempty"` + Target string `yaml:"target,omitempty" json:"target,omitempty"` + Permissions string `yaml:"permissions,omitempty" json:"permissions,omitempty"` + + Extensions Extensions `yaml:"#extensions,inline,omitempty" json:"-"` +} + +// WeightDevice is a structure that holds device:weight pair +type WeightDevice struct { + Path string + Weight uint16 + + Extensions Extensions `yaml:"#extensions,inline,omitempty" json:"-"` +} + +// ThrottleDevice is a structure that holds device:rate_per_second pair +type ThrottleDevice struct { + Path string + Rate UnitBytes + + Extensions Extensions `yaml:"#extensions,inline,omitempty" json:"-"` +} + +// MappingWithColon is a mapping type that can be converted from a list of +// 'key: value' strings +type MappingWithColon map[string]string + +// LoggingConfig the logging configuration for a service +type LoggingConfig struct { + Driver string `yaml:"driver,omitempty" json:"driver,omitempty"` + Options Options `yaml:"options,omitempty" json:"options,omitempty"` + + Extensions Extensions `yaml:"#extensions,inline,omitempty" json:"-"` +} + +// DeployConfig the deployment configuration for a service +type DeployConfig struct { + Mode string `yaml:"mode,omitempty" json:"mode,omitempty"` + Replicas *int `yaml:"replicas,omitempty" json:"replicas,omitempty"` + Labels Labels `yaml:"labels,omitempty" json:"labels,omitempty"` + UpdateConfig *UpdateConfig `yaml:"update_config,omitempty" json:"update_config,omitempty"` + RollbackConfig *UpdateConfig `yaml:"rollback_config,omitempty" json:"rollback_config,omitempty"` + Resources Resources `yaml:"resources,omitempty" json:"resources,omitempty"` + RestartPolicy *RestartPolicy `yaml:"restart_policy,omitempty" json:"restart_policy,omitempty"` + Placement Placement `yaml:"placement,omitempty" json:"placement,omitempty"` + EndpointMode string `yaml:"endpoint_mode,omitempty" json:"endpoint_mode,omitempty"` + + Extensions Extensions `yaml:"#extensions,inline,omitempty" json:"-"` +} + +// UpdateConfig the service update configuration +type UpdateConfig struct { + Parallelism *uint64 `yaml:"parallelism,omitempty" json:"parallelism,omitempty"` + Delay Duration `yaml:"delay,omitempty" json:"delay,omitempty"` + FailureAction string `yaml:"failure_action,omitempty" json:"failure_action,omitempty"` + Monitor Duration `yaml:"monitor,omitempty" json:"monitor,omitempty"` + MaxFailureRatio float32 `yaml:"max_failure_ratio,omitempty" json:"max_failure_ratio,omitempty"` + Order string `yaml:"order,omitempty" json:"order,omitempty"` + + Extensions Extensions `yaml:"#extensions,inline,omitempty" json:"-"` +} + +// Resources the resource limits and reservations +type Resources struct { + Limits *Resource `yaml:"limits,omitempty" json:"limits,omitempty"` + Reservations *Resource `yaml:"reservations,omitempty" json:"reservations,omitempty"` + + Extensions Extensions `yaml:"#extensions,inline,omitempty" json:"-"` +} + +// Resource is a resource to be limited or reserved +type Resource struct { + // TODO: types to convert from units and ratios + NanoCPUs NanoCPUs `yaml:"cpus,omitempty" json:"cpus,omitempty"` + MemoryBytes UnitBytes `yaml:"memory,omitempty" json:"memory,omitempty"` + Pids int64 `yaml:"pids,omitempty" json:"pids,omitempty"` + Devices []DeviceRequest `yaml:"devices,omitempty" json:"devices,omitempty"` + GenericResources []GenericResource `yaml:"generic_resources,omitempty" json:"generic_resources,omitempty"` + + Extensions Extensions `yaml:"#extensions,inline,omitempty" json:"-"` +} + +// GenericResource represents a "user defined" resource which can +// only be an integer (e.g: SSD=3) for a service +type GenericResource struct { + DiscreteResourceSpec *DiscreteGenericResource `yaml:"discrete_resource_spec,omitempty" json:"discrete_resource_spec,omitempty"` + + Extensions Extensions `yaml:"#extensions,inline,omitempty" json:"-"` +} + +// DiscreteGenericResource represents a "user defined" resource which is defined +// as an integer +// "Kind" is used to describe the Kind of a resource (e.g: "GPU", "FPGA", "SSD", ...) +// Value is used to count the resource (SSD=5, HDD=3, ...) +type DiscreteGenericResource struct { + Kind string `json:"kind"` + Value int64 `json:"value"` + + Extensions Extensions `yaml:"#extensions,inline,omitempty" json:"-"` +} + +// RestartPolicy the service restart policy +type RestartPolicy struct { + Condition string `yaml:"condition,omitempty" json:"condition,omitempty"` + Delay *Duration `yaml:"delay,omitempty" json:"delay,omitempty"` + MaxAttempts *uint64 `yaml:"max_attempts,omitempty" json:"max_attempts,omitempty"` + Window *Duration `yaml:"window,omitempty" json:"window,omitempty"` + + Extensions Extensions `yaml:"#extensions,inline,omitempty" json:"-"` +} + +// Placement constraints for the service +type Placement struct { + Constraints []string `yaml:"constraints,omitempty" json:"constraints,omitempty"` + Preferences []PlacementPreferences `yaml:"preferences,omitempty" json:"preferences,omitempty"` + MaxReplicas uint64 `yaml:"max_replicas_per_node,omitempty" json:"max_replicas_per_node,omitempty"` + + Extensions Extensions `yaml:"#extensions,inline,omitempty" json:"-"` +} + +// PlacementPreferences is the preferences for a service placement +type PlacementPreferences struct { + Spread string `yaml:"spread,omitempty" json:"spread,omitempty"` + + Extensions Extensions `yaml:"#extensions,inline,omitempty" json:"-"` +} + +// ServiceNetworkConfig is the network configuration for a service +type ServiceNetworkConfig struct { + Aliases []string `yaml:"aliases,omitempty" json:"aliases,omitempty"` + DriverOpts Options `yaml:"driver_opts,omitempty" json:"driver_opts,omitempty"` + GatewayPriority int `yaml:"gw_priority,omitempty" json:"gw_priority,omitempty"` + InterfaceName string `yaml:"interface_name,omitempty" json:"interface_name,omitempty"` + Ipv4Address string `yaml:"ipv4_address,omitempty" json:"ipv4_address,omitempty"` + Ipv6Address string `yaml:"ipv6_address,omitempty" json:"ipv6_address,omitempty"` + LinkLocalIPs []string `yaml:"link_local_ips,omitempty" json:"link_local_ips,omitempty"` + MacAddress string `yaml:"mac_address,omitempty" json:"mac_address,omitempty"` + Priority int `yaml:"priority,omitempty" json:"priority,omitempty"` + + Extensions Extensions `yaml:"#extensions,inline,omitempty" json:"-"` +} + +// ServicePortConfig is the port configuration for a service +type ServicePortConfig struct { + Name string `yaml:"name,omitempty" json:"name,omitempty"` + Mode string `yaml:"mode,omitempty" json:"mode,omitempty"` + HostIP string `yaml:"host_ip,omitempty" json:"host_ip,omitempty"` + Target uint32 `yaml:"target,omitempty" json:"target,omitempty"` + Published string `yaml:"published,omitempty" json:"published,omitempty"` + Protocol string `yaml:"protocol,omitempty" json:"protocol,omitempty"` + AppProtocol string `yaml:"app_protocol,omitempty" json:"app_protocol,omitempty"` + + Extensions Extensions `yaml:"#extensions,inline,omitempty" json:"-"` +} + +// ParsePortConfig parse short syntax for service port configuration +func ParsePortConfig(value string) ([]ServicePortConfig, error) { + var portConfigs []ServicePortConfig + ports, portBindings, err := nat.ParsePortSpecs([]string{value}) + if err != nil { + return nil, err + } + // We need to sort the key of the ports to make sure it is consistent + keys := []string{} + for port := range ports { + keys = append(keys, string(port)) + } + sort.Strings(keys) + + for _, key := range keys { + port := nat.Port(key) + converted := convertPortToPortConfig(port, portBindings) + portConfigs = append(portConfigs, converted...) + } + return portConfigs, nil +} + +func convertPortToPortConfig(port nat.Port, portBindings map[nat.Port][]nat.PortBinding) []ServicePortConfig { + var portConfigs []ServicePortConfig + for _, binding := range portBindings[port] { + portConfigs = append(portConfigs, ServicePortConfig{ + HostIP: binding.HostIP, + Protocol: strings.ToLower(port.Proto()), + Target: uint32(port.Int()), + Published: binding.HostPort, + Mode: "ingress", + }) + } + return portConfigs +} + +// ServiceVolumeConfig are references to a volume used by a service +type ServiceVolumeConfig struct { + Type string `yaml:"type,omitempty" json:"type,omitempty"` + Source string `yaml:"source,omitempty" json:"source,omitempty"` + Target string `yaml:"target,omitempty" json:"target,omitempty"` + ReadOnly bool `yaml:"read_only,omitempty" json:"read_only,omitempty"` + Consistency string `yaml:"consistency,omitempty" json:"consistency,omitempty"` + Bind *ServiceVolumeBind `yaml:"bind,omitempty" json:"bind,omitempty"` + Volume *ServiceVolumeVolume `yaml:"volume,omitempty" json:"volume,omitempty"` + Tmpfs *ServiceVolumeTmpfs `yaml:"tmpfs,omitempty" json:"tmpfs,omitempty"` + Image *ServiceVolumeImage `yaml:"image,omitempty" json:"image,omitempty"` + + Extensions Extensions `yaml:"#extensions,inline,omitempty" json:"-"` +} + +// String render ServiceVolumeConfig as a volume string, one can parse back using loader.ParseVolume +func (s ServiceVolumeConfig) String() string { + access := "rw" + if s.ReadOnly { + access = "ro" + } + options := []string{access} + if s.Bind != nil && s.Bind.SELinux != "" { + options = append(options, s.Bind.SELinux) + } + if s.Bind != nil && s.Bind.Propagation != "" { + options = append(options, s.Bind.Propagation) + } + if s.Volume != nil && s.Volume.NoCopy { + options = append(options, "nocopy") + } + return fmt.Sprintf("%s:%s:%s", s.Source, s.Target, strings.Join(options, ",")) +} + +const ( + // VolumeTypeBind is the type for mounting host dir + VolumeTypeBind = "bind" + // VolumeTypeVolume is the type for remote storage volumes + VolumeTypeVolume = "volume" + // VolumeTypeTmpfs is the type for mounting tmpfs + VolumeTypeTmpfs = "tmpfs" + // VolumeTypeNamedPipe is the type for mounting Windows named pipes + VolumeTypeNamedPipe = "npipe" + // VolumeTypeCluster is the type for mounting container storage interface (CSI) volumes + VolumeTypeCluster = "cluster" + // VolumeTypeImage is the tpe for mounting an image + VolumeTypeImage = "image" + + // SElinuxShared share the volume content + SElinuxShared = "z" + // SElinuxUnshared label content as private unshared + SElinuxUnshared = "Z" +) + +// ServiceVolumeBind are options for a service volume of type bind +type ServiceVolumeBind struct { + SELinux string `yaml:"selinux,omitempty" json:"selinux,omitempty"` + Propagation string `yaml:"propagation,omitempty" json:"propagation,omitempty"` + CreateHostPath OptOut `yaml:"create_host_path,omitempty" json:"create_host_path,omitzero"` + Recursive string `yaml:"recursive,omitempty" json:"recursive,omitempty"` + + Extensions Extensions `yaml:"#extensions,inline,omitempty" json:"-"` +} + +// OptOut is a boolean which default value is 'true' +type OptOut bool + +func (o OptOut) IsZero() bool { + // Attribute can be omitted if value is true + return bool(o) +} + +// SELinux represents the SELinux re-labeling options. +const ( + // SELinuxShared option indicates that the bind mount content is shared among multiple containers + SELinuxShared string = "z" + // SELinuxPrivate option indicates that the bind mount content is private and unshared + SELinuxPrivate string = "Z" +) + +// Propagation represents the propagation of a mount. +const ( + // PropagationRPrivate RPRIVATE + PropagationRPrivate string = "rprivate" + // PropagationPrivate PRIVATE + PropagationPrivate string = "private" + // PropagationRShared RSHARED + PropagationRShared string = "rshared" + // PropagationShared SHARED + PropagationShared string = "shared" + // PropagationRSlave RSLAVE + PropagationRSlave string = "rslave" + // PropagationSlave SLAVE + PropagationSlave string = "slave" +) + +// ServiceVolumeVolume are options for a service volume of type volume +type ServiceVolumeVolume struct { + Labels Mapping `yaml:"labels,omitempty" json:"labels,omitempty"` + NoCopy bool `yaml:"nocopy,omitempty" json:"nocopy,omitempty"` + Subpath string `yaml:"subpath,omitempty" json:"subpath,omitempty"` + + Extensions Extensions `yaml:"#extensions,inline,omitempty" json:"-"` +} + +// ServiceVolumeTmpfs are options for a service volume of type tmpfs +type ServiceVolumeTmpfs struct { + Size UnitBytes `yaml:"size,omitempty" json:"size,omitempty"` + + Mode uint32 `yaml:"mode,omitempty" json:"mode,omitempty"` + + Extensions Extensions `yaml:"#extensions,inline,omitempty" json:"-"` +} + +type ServiceVolumeImage struct { + SubPath string `yaml:"subpath,omitempty" json:"subpath,omitempty"` + Extensions Extensions `yaml:"#extensions,inline,omitempty" json:"-"` +} + +type FileMode int64 + +// FileReferenceConfig for a reference to a swarm file object +type FileReferenceConfig struct { + Source string `yaml:"source,omitempty" json:"source,omitempty"` + Target string `yaml:"target,omitempty" json:"target,omitempty"` + UID string `yaml:"uid,omitempty" json:"uid,omitempty"` + GID string `yaml:"gid,omitempty" json:"gid,omitempty"` + Mode *FileMode `yaml:"mode,omitempty" json:"mode,omitempty"` + + Extensions Extensions `yaml:"#extensions,inline,omitempty" json:"-"` +} + +func (f *FileMode) DecodeMapstructure(value interface{}) error { + switch v := value.(type) { + case *FileMode: + return nil + case string: + i, err := strconv.ParseInt(v, 8, 64) + if err != nil { + return err + } + *f = FileMode(i) + case int: + *f = FileMode(v) + default: + return fmt.Errorf("unexpected value type %T for mode", value) + } + return nil +} + +// MarshalYAML makes FileMode implement yaml.Marshaller +func (f *FileMode) MarshalYAML() (interface{}, error) { + return f.String(), nil +} + +// MarshalJSON makes FileMode implement json.Marshaller +func (f *FileMode) MarshalJSON() ([]byte, error) { + return []byte("\"" + f.String() + "\""), nil +} + +func (f *FileMode) String() string { + return fmt.Sprintf("0%o", int64(*f)) +} + +// ServiceConfigObjConfig is the config obj configuration for a service +type ServiceConfigObjConfig FileReferenceConfig + +// ServiceSecretConfig is the secret configuration for a service +type ServiceSecretConfig FileReferenceConfig + +// UlimitsConfig the ulimit configuration +type UlimitsConfig struct { + Single int `yaml:"single,omitempty" json:"single,omitempty"` + Soft int `yaml:"soft,omitempty" json:"soft,omitempty"` + Hard int `yaml:"hard,omitempty" json:"hard,omitempty"` + + Extensions Extensions `yaml:"#extensions,inline,omitempty" json:"-"` +} + +func (u *UlimitsConfig) DecodeMapstructure(value interface{}) error { + switch v := value.(type) { + case *UlimitsConfig: + // this call to DecodeMapstructure is triggered after initial value conversion as we use a map[string]*UlimitsConfig + return nil + case int: + u.Single = v + u.Soft = 0 + u.Hard = 0 + case map[string]any: + u.Single = 0 + soft, ok := v["soft"] + if ok { + u.Soft = soft.(int) + } + hard, ok := v["hard"] + if ok { + u.Hard = hard.(int) + } + default: + return fmt.Errorf("unexpected value type %T for ulimit", value) + } + return nil +} + +// MarshalYAML makes UlimitsConfig implement yaml.Marshaller +func (u *UlimitsConfig) MarshalYAML() (interface{}, error) { + if u.Single != 0 { + return u.Single, nil + } + return struct { + Soft int + Hard int + }{ + Soft: u.Soft, + Hard: u.Hard, + }, nil +} + +// MarshalJSON makes UlimitsConfig implement json.Marshaller +func (u *UlimitsConfig) MarshalJSON() ([]byte, error) { + if u.Single != 0 { + return json.Marshal(u.Single) + } + // Pass as a value to avoid re-entering this method and use the default implementation + return json.Marshal(*u) +} + +// NetworkConfig for a network +type NetworkConfig struct { + Name string `yaml:"name,omitempty" json:"name,omitempty"` + Driver string `yaml:"driver,omitempty" json:"driver,omitempty"` + DriverOpts Options `yaml:"driver_opts,omitempty" json:"driver_opts,omitempty"` + Ipam IPAMConfig `yaml:"ipam,omitempty" json:"ipam,omitempty"` + External External `yaml:"external,omitempty" json:"external,omitempty"` + Internal bool `yaml:"internal,omitempty" json:"internal,omitempty"` + Attachable bool `yaml:"attachable,omitempty" json:"attachable,omitempty"` + Labels Labels `yaml:"labels,omitempty" json:"labels,omitempty"` + CustomLabels Labels `yaml:"-" json:"-"` + EnableIPv4 *bool `yaml:"enable_ipv4,omitempty" json:"enable_ipv4,omitempty"` + EnableIPv6 *bool `yaml:"enable_ipv6,omitempty" json:"enable_ipv6,omitempty"` + Extensions Extensions `yaml:"#extensions,inline,omitempty" json:"-"` +} + +// IPAMConfig for a network +type IPAMConfig struct { + Driver string `yaml:"driver,omitempty" json:"driver,omitempty"` + Config []*IPAMPool `yaml:"config,omitempty" json:"config,omitempty"` + Extensions Extensions `yaml:"#extensions,inline,omitempty" json:"-"` +} + +// IPAMPool for a network +type IPAMPool struct { + Subnet string `yaml:"subnet,omitempty" json:"subnet,omitempty"` + Gateway string `yaml:"gateway,omitempty" json:"gateway,omitempty"` + IPRange string `yaml:"ip_range,omitempty" json:"ip_range,omitempty"` + AuxiliaryAddresses Mapping `yaml:"aux_addresses,omitempty" json:"aux_addresses,omitempty"` + Extensions Extensions `yaml:",inline" json:"-"` +} + +// VolumeConfig for a volume +type VolumeConfig struct { + Name string `yaml:"name,omitempty" json:"name,omitempty"` + Driver string `yaml:"driver,omitempty" json:"driver,omitempty"` + DriverOpts Options `yaml:"driver_opts,omitempty" json:"driver_opts,omitempty"` + External External `yaml:"external,omitempty" json:"external,omitempty"` + Labels Labels `yaml:"labels,omitempty" json:"labels,omitempty"` + CustomLabels Labels `yaml:"-" json:"-"` + Extensions Extensions `yaml:"#extensions,inline,omitempty" json:"-"` +} + +// External identifies a Volume or Network as a reference to a resource that is +// not managed, and should already exist. +type External bool + +// CredentialSpecConfig for credential spec on Windows +type CredentialSpecConfig struct { + Config string `yaml:"config,omitempty" json:"config,omitempty"` // Config was added in API v1.40 + File string `yaml:"file,omitempty" json:"file,omitempty"` + Registry string `yaml:"registry,omitempty" json:"registry,omitempty"` + Extensions Extensions `yaml:"#extensions,inline,omitempty" json:"-"` +} + +// FileObjectConfig is a config type for a file used by a service +type FileObjectConfig struct { + Name string `yaml:"name,omitempty" json:"name,omitempty"` + File string `yaml:"file,omitempty" json:"file,omitempty"` + Environment string `yaml:"environment,omitempty" json:"environment,omitempty"` + Content string `yaml:"content,omitempty" json:"content,omitempty"` + // configure marshalling to include Content - excluded by default to prevent sensitive data leaks + marshallContent bool + External External `yaml:"external,omitempty" json:"external,omitempty"` + Labels Labels `yaml:"labels,omitempty" json:"labels,omitempty"` + Driver string `yaml:"driver,omitempty" json:"driver,omitempty"` + DriverOpts map[string]string `yaml:"driver_opts,omitempty" json:"driver_opts,omitempty"` + TemplateDriver string `yaml:"template_driver,omitempty" json:"template_driver,omitempty"` + Extensions Extensions `yaml:"#extensions,inline,omitempty" json:"-"` +} + +const ( + // ServiceConditionCompletedSuccessfully is the type for waiting until a service has completed successfully (exit code 0). + ServiceConditionCompletedSuccessfully = "service_completed_successfully" + + // ServiceConditionHealthy is the type for waiting until a service is healthy. + ServiceConditionHealthy = "service_healthy" + + // ServiceConditionStarted is the type for waiting until a service has started (default). + ServiceConditionStarted = "service_started" +) + +type DependsOnConfig map[string]ServiceDependency + +type ServiceDependency struct { + Condition string `yaml:"condition,omitempty" json:"condition,omitempty"` + Restart bool `yaml:"restart,omitempty" json:"restart,omitempty"` + Extensions Extensions `yaml:"#extensions,inline,omitempty" json:"-"` + Required bool `yaml:"required" json:"required"` +} + +type ExtendsConfig struct { + File string `yaml:"file,omitempty" json:"file,omitempty"` + Service string `yaml:"service,omitempty" json:"service,omitempty"` +} + +// SecretConfig for a secret +type SecretConfig FileObjectConfig + +// MarshalYAML makes SecretConfig implement yaml.Marshaller +func (s SecretConfig) MarshalYAML() (interface{}, error) { + // secret content is set while loading model. Never marshall it + if !s.marshallContent { + s.Content = "" + } + return FileObjectConfig(s), nil +} + +// MarshalJSON makes SecretConfig implement json.Marshaller +func (s SecretConfig) MarshalJSON() ([]byte, error) { + // secret content is set while loading model. Never marshall it + if !s.marshallContent { + s.Content = "" + } + return json.Marshal(FileObjectConfig(s)) +} + +// ConfigObjConfig is the config for the swarm "Config" object +type ConfigObjConfig FileObjectConfig + +// MarshalYAML makes ConfigObjConfig implement yaml.Marshaller +func (s ConfigObjConfig) MarshalYAML() (interface{}, error) { + // config content may have been set from environment while loading model. Marshall actual source + if s.Environment != "" { + s.Content = "" + } + return FileObjectConfig(s), nil +} + +// MarshalJSON makes ConfigObjConfig implement json.Marshaller +func (s ConfigObjConfig) MarshalJSON() ([]byte, error) { + // config content may have been set from environment while loading model. Marshall actual source + if s.Environment != "" { + s.Content = "" + } + return json.Marshal(FileObjectConfig(s)) +} + +type IncludeConfig struct { + Path StringList `yaml:"path,omitempty" json:"path,omitempty"` + ProjectDirectory string `yaml:"project_directory,omitempty" json:"project_directory,omitempty"` + EnvFile StringList `yaml:"env_file,omitempty" json:"env_file,omitempty"` +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/utils/collectionutils.go b/vendor/github.com/compose-spec/compose-go/v2/utils/collectionutils.go new file mode 100644 index 00000000..ed815f0f --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/utils/collectionutils.go @@ -0,0 +1,66 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package utils + +import ( + "cmp" + "maps" + "slices" +) + +func MapKeys[T cmp.Ordered, U any](theMap map[T]U) []T { + return slices.Sorted(maps.Keys(theMap)) +} + +func MapsAppend[T comparable, U any](target map[T]U, source map[T]U) map[T]U { + if target == nil { + return source + } + if source == nil { + return target + } + for key, value := range source { + if _, ok := target[key]; !ok { + target[key] = value + } + } + return target +} + +func ArrayContains[T comparable](source []T, toCheck []T) bool { + for _, value := range toCheck { + if !slices.Contains(source, value) { + return false + } + } + return true +} + +func RemoveDuplicates[T comparable](slice []T) []T { + // Create a map to store unique elements + seen := make(map[T]bool) + result := []T{} + + // Loop through the slice, adding elements to the map if they haven't been seen before + for _, val := range slice { + if _, ok := seen[val]; !ok { + seen[val] = true + result = append(result, val) + } + } + return result +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/utils/pathutils.go b/vendor/github.com/compose-spec/compose-go/v2/utils/pathutils.go new file mode 100644 index 00000000..211e2999 --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/utils/pathutils.go @@ -0,0 +1,91 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package utils + +import ( + "os" + "path/filepath" + "strings" +) + +// ResolveSymbolicLink converts the section of an absolute path if it is a +// symbolic link +// +// Parameters: +// - path: an absolute path +// +// Returns: +// - converted path if it has a symbolic link or the same path if there is +// no symbolic link +func ResolveSymbolicLink(path string) (string, error) { + sym, part, err := getSymbolinkLink(path) + if err != nil { + return "", err + } + if sym == "" && part == "" { + // no symbolic link detected + return path, nil + } + return strings.Replace(path, part, sym, 1), nil +} + +// getSymbolinkLink parses all parts of the path and returns the +// the symbolic link part as well as the correspondent original part +// Parameters: +// - path: an absolute path +// +// Returns: +// - string section of the path that is a symbolic link +// - string correspondent path section of the symbolic link +// - An error +func getSymbolinkLink(path string) (string, string, error) { + parts := strings.Split(path, string(os.PathSeparator)) + + // Reconstruct the path step by step, checking each component + var currentPath string + if filepath.IsAbs(path) { + currentPath = string(os.PathSeparator) + } + + for _, part := range parts { + if part == "" { + continue + } + currentPath = filepath.Join(currentPath, part) + + if isSymLink := isSymbolicLink(currentPath); isSymLink { + // return symbolic link, and correspondent part + target, err := filepath.EvalSymlinks(currentPath) + if err != nil { + return "", "", err + } + return target, currentPath, nil + } + } + return "", "", nil // no symbolic link +} + +// isSymbolicLink validates if the path is a symbolic link +func isSymbolicLink(path string) bool { + info, err := os.Lstat(path) + if err != nil { + return false + } + + // Check if the file mode indicates a symbolic link + return info.Mode()&os.ModeSymlink != 0 +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/utils/set.go b/vendor/github.com/compose-spec/compose-go/v2/utils/set.go new file mode 100644 index 00000000..bbbeaa96 --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/utils/set.go @@ -0,0 +1,95 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package utils + +type Set[T comparable] map[T]struct{} + +func NewSet[T comparable](v ...T) Set[T] { + if len(v) == 0 { + return make(Set[T]) + } + + out := make(Set[T], len(v)) + for i := range v { + out.Add(v[i]) + } + return out +} + +func (s Set[T]) Has(v T) bool { + _, ok := s[v] + return ok +} + +func (s Set[T]) Add(v T) { + s[v] = struct{}{} +} + +func (s Set[T]) AddAll(v ...T) { + for _, e := range v { + s[e] = struct{}{} + } +} + +func (s Set[T]) Remove(v T) bool { + _, ok := s[v] + if ok { + delete(s, v) + } + return ok +} + +func (s Set[T]) Clear() { + for v := range s { + delete(s, v) + } +} + +func (s Set[T]) Elements() []T { + elements := make([]T, 0, len(s)) + for v := range s { + elements = append(elements, v) + } + return elements +} + +func (s Set[T]) RemoveAll(elements ...T) { + for _, e := range elements { + s.Remove(e) + } +} + +func (s Set[T]) Diff(other Set[T]) Set[T] { + out := make(Set[T]) + for k := range s { + if _, ok := other[k]; !ok { + out[k] = struct{}{} + } + } + return out +} + +func (s Set[T]) Union(other Set[T]) Set[T] { + out := make(Set[T]) + for k := range s { + out[k] = struct{}{} + } + for k := range other { + out[k] = struct{}{} + } + return out +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/utils/stringutils.go b/vendor/github.com/compose-spec/compose-go/v2/utils/stringutils.go new file mode 100644 index 00000000..fc6b2035 --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/utils/stringutils.go @@ -0,0 +1,50 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package utils + +import ( + "fmt" + "strconv" + "strings" +) + +// StringToBool converts a string to a boolean ignoring errors +func StringToBool(s string) bool { + b, _ := strconv.ParseBool(strings.ToLower(strings.TrimSpace(s))) + return b +} + +// GetAsEqualsMap split key=value formatted strings into a key : value map +func GetAsEqualsMap(em []string) map[string]string { + m := make(map[string]string) + for _, v := range em { + key, val, found := strings.Cut(v, "=") + if found { + m[key] = val + } + } + return m +} + +// GetAsEqualsMap format a key : value map into key=value strings +func GetAsStringList(em map[string]string) []string { + m := make([]string, 0, len(em)) + for k, v := range em { + m = append(m, fmt.Sprintf("%s=%s", k, v)) + } + return m +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/validation/external.go b/vendor/github.com/compose-spec/compose-go/v2/validation/external.go new file mode 100644 index 00000000..b74d551a --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/validation/external.go @@ -0,0 +1,49 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package validation + +import ( + "fmt" + "strings" + + "github.com/compose-spec/compose-go/v2/consts" + "github.com/compose-spec/compose-go/v2/tree" +) + +func checkExternal(v map[string]any, p tree.Path) error { + b, ok := v["external"] + if !ok { + return nil + } + if !b.(bool) { + return nil + } + + for k := range v { + switch k { + case "name", "external", consts.Extensions: + continue + default: + if strings.HasPrefix(k, "x-") { + // custom extension, ignored + continue + } + return fmt.Errorf("%s: conflicting parameters \"external\" and %q specified", p, k) + } + } + return nil +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/validation/validation.go b/vendor/github.com/compose-spec/compose-go/v2/validation/validation.go new file mode 100644 index 00000000..28cad44c --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/validation/validation.go @@ -0,0 +1,119 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package validation + +import ( + "fmt" + "net" + "strings" + + "github.com/compose-spec/compose-go/v2/tree" +) + +type checkerFunc func(value any, p tree.Path) error + +var checks = map[tree.Path]checkerFunc{ + "volumes.*": checkVolume, + "configs.*": checkFileObject("file", "environment", "content"), + "secrets.*": checkFileObject("file", "environment"), + "services.*.ports.*": checkIPAddress, + "services.*.develop.watch.*.path": checkPath, + "services.*.deploy.resources.reservations.devices.*": checkDeviceRequest, + "services.*.gpus.*": checkDeviceRequest, +} + +func Validate(dict map[string]any) error { + return check(dict, tree.NewPath()) +} + +func check(value any, p tree.Path) error { + for pattern, fn := range checks { + if p.Matches(pattern) { + return fn(value, p) + } + } + switch v := value.(type) { + case map[string]any: + for k, v := range v { + err := check(v, p.Next(k)) + if err != nil { + return err + } + } + case []any: + for _, e := range v { + err := check(e, p.Next("[]")) + if err != nil { + return err + } + } + } + return nil +} + +func checkFileObject(keys ...string) checkerFunc { + return func(value any, p tree.Path) error { + v := value.(map[string]any) + count := 0 + for _, s := range keys { + if _, ok := v[s]; ok { + count++ + } + } + if count > 1 { + return fmt.Errorf("%s: %s attributes are mutually exclusive", p, strings.Join(keys, "|")) + } + if count == 0 { + if _, ok := v["driver"]; ok { + // User specified a custom driver, which might have it's own way to set content + return nil + } + if _, ok := v["external"]; !ok { + return fmt.Errorf("%s: one of %s must be set", p, strings.Join(keys, "|")) + } + } + return nil + } +} + +func checkPath(value any, p tree.Path) error { + v := value.(string) + if v == "" { + return fmt.Errorf("%s: value can't be blank", p) + } + return nil +} + +func checkDeviceRequest(value any, p tree.Path) error { + v := value.(map[string]any) + _, hasCount := v["count"] + _, hasIDs := v["device_ids"] + if hasCount && hasIDs { + return fmt.Errorf(`%s: "count" and "device_ids" attributes are exclusive`, p) + } + return nil +} + +func checkIPAddress(value any, p tree.Path) error { + if v, ok := value.(map[string]any); ok { + ip, ok := v["host_ip"] + if ok && net.ParseIP(ip.(string)) == nil { + return fmt.Errorf("%s: invalid ip address: %s", p, ip) + } + } + return nil +} diff --git a/vendor/github.com/compose-spec/compose-go/v2/validation/volume.go b/vendor/github.com/compose-spec/compose-go/v2/validation/volume.go new file mode 100644 index 00000000..5b400681 --- /dev/null +++ b/vendor/github.com/compose-spec/compose-go/v2/validation/volume.go @@ -0,0 +1,39 @@ +/* + Copyright 2020 The Compose Specification Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package validation + +import ( + "fmt" + + "github.com/compose-spec/compose-go/v2/tree" +) + +func checkVolume(value any, p tree.Path) error { + if value == nil { + return nil + } + v, ok := value.(map[string]any) + if !ok { + return fmt.Errorf("expected volume, got %s", value) + } + + err := checkExternal(v, p) + if err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/mattn/go-shellwords/.travis.yml b/vendor/github.com/mattn/go-shellwords/.travis.yml new file mode 100644 index 00000000..ebd5edd8 --- /dev/null +++ b/vendor/github.com/mattn/go-shellwords/.travis.yml @@ -0,0 +1,16 @@ +arch: + - amd64 + - ppc64le +language: go +sudo: false +go: + - tip + +before_install: + - go get -t -v ./... + +script: + - ./go.test.sh + +after_success: + - bash <(curl -s https://codecov.io/bash) diff --git a/vendor/github.com/mattn/go-shellwords/LICENSE b/vendor/github.com/mattn/go-shellwords/LICENSE new file mode 100644 index 00000000..740fa931 --- /dev/null +++ b/vendor/github.com/mattn/go-shellwords/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2017 Yasuhiro Matsumoto + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/mattn/go-shellwords/README.md b/vendor/github.com/mattn/go-shellwords/README.md new file mode 100644 index 00000000..bdd53191 --- /dev/null +++ b/vendor/github.com/mattn/go-shellwords/README.md @@ -0,0 +1,55 @@ +# go-shellwords + +[![codecov](https://codecov.io/gh/mattn/go-shellwords/branch/master/graph/badge.svg)](https://codecov.io/gh/mattn/go-shellwords) +[![Build Status](https://travis-ci.org/mattn/go-shellwords.svg?branch=master)](https://travis-ci.org/mattn/go-shellwords) +[![PkgGoDev](https://pkg.go.dev/badge/github.com/mattn/go-shellwords)](https://pkg.go.dev/github.com/mattn/go-shellwords) +[![ci](https://github.com/mattn/go-shellwords/ci/badge.svg)](https://github.com/mattn/go-shellwords/actions) + +Parse line as shell words. + +## Usage + +```go +args, err := shellwords.Parse("./foo --bar=baz") +// args should be ["./foo", "--bar=baz"] +``` + +```go +envs, args, err := shellwords.ParseWithEnvs("FOO=foo BAR=baz ./foo --bar=baz") +// envs should be ["FOO=foo", "BAR=baz"] +// args should be ["./foo", "--bar=baz"] +``` + +```go +os.Setenv("FOO", "bar") +p := shellwords.NewParser() +p.ParseEnv = true +args, err := p.Parse("./foo $FOO") +// args should be ["./foo", "bar"] +``` + +```go +p := shellwords.NewParser() +p.ParseBacktick = true +args, err := p.Parse("./foo `echo $SHELL`") +// args should be ["./foo", "/bin/bash"] +``` + +```go +shellwords.ParseBacktick = true +p := shellwords.NewParser() +args, err := p.Parse("./foo `echo $SHELL`") +// args should be ["./foo", "/bin/bash"] +``` + +# Thanks + +This is based on cpan module [Parse::CommandLine](https://metacpan.org/pod/Parse::CommandLine). + +# License + +under the MIT License: http://mattn.mit-license.org/2017 + +# Author + +Yasuhiro Matsumoto (a.k.a mattn) diff --git a/vendor/github.com/mattn/go-shellwords/go.test.sh b/vendor/github.com/mattn/go-shellwords/go.test.sh new file mode 100644 index 00000000..a7deaca9 --- /dev/null +++ b/vendor/github.com/mattn/go-shellwords/go.test.sh @@ -0,0 +1,12 @@ +#!/usr/bin/env bash + +set -e +echo "" > coverage.txt + +for d in $(go list ./... | grep -v vendor); do + go test -coverprofile=profile.out -covermode=atomic "$d" + if [ -f profile.out ]; then + cat profile.out >> coverage.txt + rm profile.out + fi +done diff --git a/vendor/github.com/mattn/go-shellwords/shellwords.go b/vendor/github.com/mattn/go-shellwords/shellwords.go new file mode 100644 index 00000000..1b42a001 --- /dev/null +++ b/vendor/github.com/mattn/go-shellwords/shellwords.go @@ -0,0 +1,317 @@ +package shellwords + +import ( + "bytes" + "errors" + "os" + "strings" + "unicode" +) + +var ( + ParseEnv bool = false + ParseBacktick bool = false +) + +func isSpace(r rune) bool { + switch r { + case ' ', '\t', '\r', '\n': + return true + } + return false +} + +func replaceEnv(getenv func(string) string, s string) string { + if getenv == nil { + getenv = os.Getenv + } + + var buf bytes.Buffer + rs := []rune(s) + for i := 0; i < len(rs); i++ { + r := rs[i] + if r == '\\' { + i++ + if i == len(rs) { + break + } + buf.WriteRune(rs[i]) + continue + } else if r == '$' { + i++ + if i == len(rs) { + buf.WriteRune(r) + break + } + if rs[i] == 0x7b { + i++ + p := i + for ; i < len(rs); i++ { + r = rs[i] + if r == '\\' { + i++ + if i == len(rs) { + return s + } + continue + } + if r == 0x7d || (!unicode.IsLetter(r) && r != '_' && !unicode.IsDigit(r)) { + break + } + } + if r != 0x7d { + return s + } + if i > p { + buf.WriteString(getenv(s[p:i])) + } + } else { + p := i + for ; i < len(rs); i++ { + r := rs[i] + if r == '\\' { + i++ + if i == len(rs) { + return s + } + continue + } + if !unicode.IsLetter(r) && r != '_' && !unicode.IsDigit(r) { + break + } + } + if i > p { + buf.WriteString(getenv(s[p:i])) + i-- + } else { + buf.WriteString(s[p:]) + } + } + } else { + buf.WriteRune(r) + } + } + return buf.String() +} + +type Parser struct { + ParseEnv bool + ParseBacktick bool + Position int + Dir string + + // If ParseEnv is true, use this for getenv. + // If nil, use os.Getenv. + Getenv func(string) string +} + +func NewParser() *Parser { + return &Parser{ + ParseEnv: ParseEnv, + ParseBacktick: ParseBacktick, + Position: 0, + Dir: "", + } +} + +type argType int + +const ( + argNo argType = iota + argSingle + argQuoted +) + +func (p *Parser) Parse(line string) ([]string, error) { + args := []string{} + buf := "" + var escaped, doubleQuoted, singleQuoted, backQuote, dollarQuote bool + backtick := "" + + pos := -1 + got := argNo + + i := -1 +loop: + for _, r := range line { + i++ + if escaped { + buf += string(r) + escaped = false + got = argSingle + continue + } + + if r == '\\' { + if singleQuoted { + buf += string(r) + } else { + escaped = true + } + continue + } + + if isSpace(r) { + if singleQuoted || doubleQuoted || backQuote || dollarQuote { + buf += string(r) + backtick += string(r) + } else if got != argNo { + if p.ParseEnv { + if got == argSingle { + parser := &Parser{ParseEnv: false, ParseBacktick: false, Position: 0, Dir: p.Dir} + strs, err := parser.Parse(replaceEnv(p.Getenv, buf)) + if err != nil { + return nil, err + } + args = append(args, strs...) + } else { + args = append(args, replaceEnv(p.Getenv, buf)) + } + } else { + args = append(args, buf) + } + buf = "" + got = argNo + } + continue + } + + switch r { + case '`': + if !singleQuoted && !doubleQuoted && !dollarQuote { + if p.ParseBacktick { + if backQuote { + out, err := shellRun(backtick, p.Dir) + if err != nil { + return nil, err + } + buf = buf[:len(buf)-len(backtick)] + out + } + backtick = "" + backQuote = !backQuote + continue + } + backtick = "" + backQuote = !backQuote + } + case ')': + if !singleQuoted && !doubleQuoted && !backQuote { + if p.ParseBacktick { + if dollarQuote { + out, err := shellRun(backtick, p.Dir) + if err != nil { + return nil, err + } + buf = buf[:len(buf)-len(backtick)-2] + out + } + backtick = "" + dollarQuote = !dollarQuote + continue + } + backtick = "" + dollarQuote = !dollarQuote + } + case '(': + if !singleQuoted && !doubleQuoted && !backQuote { + if !dollarQuote && strings.HasSuffix(buf, "$") { + dollarQuote = true + buf += "(" + continue + } else { + return nil, errors.New("invalid command line string") + } + } + case '"': + if !singleQuoted && !dollarQuote { + if doubleQuoted { + got = argQuoted + } + doubleQuoted = !doubleQuoted + continue + } + case '\'': + if !doubleQuoted && !dollarQuote { + if singleQuoted { + got = argQuoted + } + singleQuoted = !singleQuoted + continue + } + case ';', '&', '|', '<', '>': + if !(escaped || singleQuoted || doubleQuoted || backQuote || dollarQuote) { + if r == '>' && len(buf) > 0 { + if c := buf[0]; '0' <= c && c <= '9' { + i -= 1 + got = argNo + } + } + pos = i + break loop + } + } + + got = argSingle + buf += string(r) + if backQuote || dollarQuote { + backtick += string(r) + } + } + + if got != argNo { + if p.ParseEnv { + if got == argSingle { + parser := &Parser{ParseEnv: false, ParseBacktick: false, Position: 0, Dir: p.Dir} + strs, err := parser.Parse(replaceEnv(p.Getenv, buf)) + if err != nil { + return nil, err + } + args = append(args, strs...) + } else { + args = append(args, replaceEnv(p.Getenv, buf)) + } + } else { + args = append(args, buf) + } + } + + if escaped || singleQuoted || doubleQuoted || backQuote || dollarQuote { + return nil, errors.New("invalid command line string") + } + + p.Position = pos + + return args, nil +} + +func (p *Parser) ParseWithEnvs(line string) (envs []string, args []string, err error) { + _args, err := p.Parse(line) + if err != nil { + return nil, nil, err + } + envs = []string{} + args = []string{} + parsingEnv := true + for _, arg := range _args { + if parsingEnv && isEnv(arg) { + envs = append(envs, arg) + } else { + if parsingEnv { + parsingEnv = false + } + args = append(args, arg) + } + } + return envs, args, nil +} + +func isEnv(arg string) bool { + return len(strings.Split(arg, "=")) == 2 +} + +func Parse(line string) ([]string, error) { + return NewParser().Parse(line) +} + +func ParseWithEnvs(line string) (envs []string, args []string, err error) { + return NewParser().ParseWithEnvs(line) +} diff --git a/vendor/github.com/mattn/go-shellwords/util_posix.go b/vendor/github.com/mattn/go-shellwords/util_posix.go new file mode 100644 index 00000000..b56a9012 --- /dev/null +++ b/vendor/github.com/mattn/go-shellwords/util_posix.go @@ -0,0 +1,29 @@ +// +build !windows + +package shellwords + +import ( + "fmt" + "os" + "os/exec" + "strings" +) + +func shellRun(line, dir string) (string, error) { + var shell string + if shell = os.Getenv("SHELL"); shell == "" { + shell = "/bin/sh" + } + cmd := exec.Command(shell, "-c", line) + if dir != "" { + cmd.Dir = dir + } + b, err := cmd.Output() + if err != nil { + if eerr, ok := err.(*exec.ExitError); ok { + b = eerr.Stderr + } + return "", fmt.Errorf("%s: %w", string(b), err) + } + return strings.TrimSpace(string(b)), nil +} diff --git a/vendor/github.com/mattn/go-shellwords/util_windows.go b/vendor/github.com/mattn/go-shellwords/util_windows.go new file mode 100644 index 00000000..fd738a72 --- /dev/null +++ b/vendor/github.com/mattn/go-shellwords/util_windows.go @@ -0,0 +1,29 @@ +// +build windows + +package shellwords + +import ( + "fmt" + "os" + "os/exec" + "strings" +) + +func shellRun(line, dir string) (string, error) { + var shell string + if shell = os.Getenv("COMSPEC"); shell == "" { + shell = "cmd" + } + cmd := exec.Command(shell, "/c", line) + if dir != "" { + cmd.Dir = dir + } + b, err := cmd.Output() + if err != nil { + if eerr, ok := err.(*exec.ExitError); ok { + b = eerr.Stderr + } + return "", fmt.Errorf("%s: %w", string(b), err) + } + return strings.TrimSpace(string(b)), nil +} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v6/.gitmodules b/vendor/github.com/santhosh-tekuri/jsonschema/v6/.gitmodules new file mode 100644 index 00000000..d14f5ea7 --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v6/.gitmodules @@ -0,0 +1,4 @@ +[submodule "testdata/JSON-Schema-Test-Suite"] + path = testdata/JSON-Schema-Test-Suite + url = https://github.com/json-schema-org/JSON-Schema-Test-Suite.git + branch = main diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v6/.golangci.yml b/vendor/github.com/santhosh-tekuri/jsonschema/v6/.golangci.yml new file mode 100644 index 00000000..b3cd1749 --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v6/.golangci.yml @@ -0,0 +1,5 @@ +linters: + enable: + - nakedret + - errname + - godot diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v6/.pre-commit-hooks.yaml b/vendor/github.com/santhosh-tekuri/jsonschema/v6/.pre-commit-hooks.yaml new file mode 100644 index 00000000..695b502e --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v6/.pre-commit-hooks.yaml @@ -0,0 +1,7 @@ +- id: jsonschema-validate + name: Validate JSON against JSON Schema + description: ensure json files follow specified JSON Schema + entry: jv + language: golang + additional_dependencies: + - ./cmd/jv diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v6/LICENSE b/vendor/github.com/santhosh-tekuri/jsonschema/v6/LICENSE new file mode 100644 index 00000000..19dc35b2 --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v6/LICENSE @@ -0,0 +1,175 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. \ No newline at end of file diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v6/README.md b/vendor/github.com/santhosh-tekuri/jsonschema/v6/README.md new file mode 100644 index 00000000..0831d7f5 --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v6/README.md @@ -0,0 +1,86 @@ +# jsonschema v6.0.0 + +[![License](https://img.shields.io/badge/License-Apache%202.0-blue.svg)](https://opensource.org/licenses/Apache-2.0) +[![GoDoc](https://godoc.org/github.com/santhosh-tekuri/jsonschema?status.svg)](https://pkg.go.dev/github.com/santhosh-tekuri/jsonschema/v6) +[![Go Report Card](https://goreportcard.com/badge/github.com/santhosh-tekuri/jsonschema/v6)](https://goreportcard.com/report/github.com/santhosh-tekuri/jsonschema/v6) +[![Build Status](https://github.com/santhosh-tekuri/jsonschema/actions/workflows/go.yaml/badge.svg?branch=boon)](https://github.com/santhosh-tekuri/jsonschema/actions/workflows/go.yaml) +[![codecov](https://codecov.io/gh/santhosh-tekuri/jsonschema/branch/boon/graph/badge.svg?token=JMVj1pFT2l)](https://codecov.io/gh/santhosh-tekuri/jsonschema/tree/boon) + +see [godoc](https://pkg.go.dev/github.com/santhosh-tekuri/jsonschema/v6) for examples + +## Library Features + +- [x] pass [JSON-Schema-Test-Suite](https://github.com/json-schema-org/JSON-Schema-Test-Suite) excluding optional(compare with other impls at [bowtie](https://bowtie-json-schema.github.io/bowtie/#)) + - [x] [![draft-04](https://img.shields.io/endpoint?url=https://bowtie.report/badges/go-jsonschema/compliance/draft4.json)](https://bowtie.report/#/dialects/draft4) + - [x] [![draft-06](https://img.shields.io/endpoint?url=https://bowtie.report/badges/go-jsonschema/compliance/draft6.json)](https://bowtie.report/#/dialects/draft6) + - [x] [![draft-07](https://img.shields.io/endpoint?url=https://bowtie.report/badges/go-jsonschema/compliance/draft7.json)](https://bowtie.report/#/dialects/draft7) + - [x] [![draft/2019-09](https://img.shields.io/endpoint?url=https://bowtie.report/badges/go-jsonschema/compliance/draft2019-09.json)](https://bowtie.report/#/dialects/draft2019-09) + - [x] [![draft/2020-12](https://img.shields.io/endpoint?url=https://bowtie.report/badges/go-jsonschema/compliance/draft2020-12.json)](https://bowtie.report/#/dialects/draft2020-12) +- [x] detect infinite loop traps + - [x] `$schema` cycle + - [x] validation cycle +- [x] custom `$schema` url +- [x] vocabulary based validation +- [x] custom regex engine +- [x] format assertions + - [x] flag to enable in draft >= 2019-09 + - [x] custom format registration + - [x] built-in formats + - [x] regex, uuid + - [x] ipv4, ipv6 + - [x] hostname, email + - [x] date, time, date-time, duration + - [x] json-pointer, relative-json-pointer + - [x] uri, uri-reference, uri-template + - [x] iri, iri-reference + - [x] period, semver +- [x] content assertions + - [x] flag to enable in draft >= 7 + - [x] contentEncoding + - [x] base64 + - [x] custom + - [x] contentMediaType + - [x] application/json + - [x] custom + - [x] contentSchema +- [x] errors + - [x] introspectable + - [x] hierarchy + - [x] alternative display with `#` + - [x] output + - [x] flag + - [x] basic + - [x] detailed +- [x] custom vocabulary + - enable via `$vocabulary` for draft >=2019-19 + - enable via flag for draft <= 7 +- [x] mixed dialect support + +## CLI + +to install: `go install github.com/santhosh-tekuri/jsonschema/cmd/jv@latest` + +``` +Usage: jv [OPTIONS] SCHEMA [INSTANCE...] + +Options: + -c, --assert-content Enable content assertions with draft >= 7 + -f, --assert-format Enable format assertions with draft >= 2019 + --cacert pem-file Use the specified pem-file to verify the peer. The file may contain multiple CA certificates + -d, --draft version Draft version used when '$schema' is missing. Valid values 4, 6, 7, 2019, 2020 (default 2020) + -h, --help Print help information + -k, --insecure Use insecure TLS connection + -o, --output format Output format. Valid values simple, alt, flag, basic, detailed (default "simple") + -q, --quiet Do not print errors + -v, --version Print build information +``` + +- [x] exit code `1` for validation erros, `2` for usage errors +- [x] validate both schema and multiple instances +- [x] support both json and yaml files +- [x] support standard input, use `-` +- [x] quite mode with parsable output +- [x] http(s) url support + - [x] custom certs for validation, use `--cacert` + - [x] flag to skip certificate verification, use `--insecure` + diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v6/compiler.go b/vendor/github.com/santhosh-tekuri/jsonschema/v6/compiler.go new file mode 100644 index 00000000..4da73610 --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v6/compiler.go @@ -0,0 +1,332 @@ +package jsonschema + +import ( + "fmt" + "regexp" + "slices" +) + +// Compiler compiles json schema into *Schema. +type Compiler struct { + schemas map[urlPtr]*Schema + roots *roots + formats map[string]*Format + decoders map[string]*Decoder + mediaTypes map[string]*MediaType + assertFormat bool + assertContent bool +} + +// NewCompiler create Compiler Object. +func NewCompiler() *Compiler { + return &Compiler{ + schemas: map[urlPtr]*Schema{}, + roots: newRoots(), + formats: map[string]*Format{}, + decoders: map[string]*Decoder{}, + mediaTypes: map[string]*MediaType{}, + assertFormat: false, + assertContent: false, + } +} + +// DefaultDraft overrides the draft used to +// compile schemas without `$schema` field. +// +// By default, this library uses the latest +// draft supported. +// +// The use of this option is HIGHLY encouraged +// to ensure continued correct operation of your +// schema. The current default value will not stay +// the same overtime. +func (c *Compiler) DefaultDraft(d *Draft) { + c.roots.defaultDraft = d +} + +// AssertFormat always enables format assertions. +// +// Default Behavior: +// for draft-07: enabled. +// for draft/2019-09: disabled unless metaschema says `format` vocabulary is required. +// for draft/2020-12: disabled unless metaschema says `format-assertion` vocabulary is required. +func (c *Compiler) AssertFormat() { + c.assertFormat = true +} + +// AssertContent enables content assertions. +// +// Content assertions include keywords: +// - contentEncoding +// - contentMediaType +// - contentSchema +// +// Default behavior is always disabled. +func (c *Compiler) AssertContent() { + c.assertContent = true +} + +// RegisterFormat registers custom format. +// +// NOTE: +// - "regex" format can not be overridden +// - format assertions are disabled for draft >= 2019-09 +// see [Compiler.AssertFormat] +func (c *Compiler) RegisterFormat(f *Format) { + if f.Name != "regex" { + c.formats[f.Name] = f + } +} + +// RegisterContentEncoding registers custom contentEncoding. +// +// NOTE: content assertions are disabled by default. +// see [Compiler.AssertContent]. +func (c *Compiler) RegisterContentEncoding(d *Decoder) { + c.decoders[d.Name] = d +} + +// RegisterContentMediaType registers custom contentMediaType. +// +// NOTE: content assertions are disabled by default. +// see [Compiler.AssertContent]. +func (c *Compiler) RegisterContentMediaType(mt *MediaType) { + c.mediaTypes[mt.Name] = mt +} + +// RegisterVocabulary registers custom vocabulary. +// +// NOTE: +// - vocabularies are disabled for draft >= 2019-09 +// see [Compiler.AssertVocabs] +func (c *Compiler) RegisterVocabulary(vocab *Vocabulary) { + c.roots.vocabularies[vocab.URL] = vocab +} + +// AssertVocabs always enables user-defined vocabularies assertions. +// +// Default Behavior: +// for draft-07: enabled. +// for draft/2019-09: disabled unless metaschema enables a vocabulary. +// for draft/2020-12: disabled unless metaschema enables a vocabulary. +func (c *Compiler) AssertVocabs() { + c.roots.assertVocabs = true +} + +// AddResource adds schema resource which gets used later in reference +// resolution. +// +// The argument url can be file path or url. Any fragment in url is ignored. +// The argument doc must be valid json value. +func (c *Compiler) AddResource(url string, doc any) error { + uf, err := absolute(url) + if err != nil { + return err + } + if isMeta(string(uf.url)) { + return &ResourceExistsError{string(uf.url)} + } + if !c.roots.loader.add(uf.url, doc) { + return &ResourceExistsError{string(uf.url)} + } + return nil +} + +// UseLoader overrides the default [URLLoader] used +// to load schema resources. +func (c *Compiler) UseLoader(loader URLLoader) { + c.roots.loader.loader = loader +} + +// UseRegexpEngine changes the regexp-engine used. +// By default it uses regexp package from go standard +// library. +// +// NOTE: must be called before compiling any schemas. +func (c *Compiler) UseRegexpEngine(engine RegexpEngine) { + if engine == nil { + engine = goRegexpCompile + } + c.roots.regexpEngine = engine +} + +func (c *Compiler) enqueue(q *queue, up urlPtr) *Schema { + if sch, ok := c.schemas[up]; ok { + // already got compiled + return sch + } + if sch := q.get(up); sch != nil { + return sch + } + sch := newSchema(up) + q.append(sch) + return sch +} + +// MustCompile is like [Compile] but panics if compilation fails. +// It simplifies safe initialization of global variables holding +// compiled schema. +func (c *Compiler) MustCompile(loc string) *Schema { + sch, err := c.Compile(loc) + if err != nil { + panic(fmt.Sprintf("jsonschema: Compile(%q): %v", loc, err)) + } + return sch +} + +// Compile compiles json-schema at given loc. +func (c *Compiler) Compile(loc string) (*Schema, error) { + uf, err := absolute(loc) + if err != nil { + return nil, err + } + up, err := c.roots.resolveFragment(*uf) + if err != nil { + return nil, err + } + return c.doCompile(up) +} + +func (c *Compiler) doCompile(up urlPtr) (*Schema, error) { + q := &queue{} + compiled := 0 + + c.enqueue(q, up) + for q.len() > compiled { + sch := q.at(compiled) + if err := c.roots.ensureSubschema(sch.up); err != nil { + return nil, err + } + r := c.roots.roots[sch.up.url] + v, err := sch.up.lookup(r.doc) + if err != nil { + return nil, err + } + if err := c.compileValue(v, sch, r, q); err != nil { + return nil, err + } + compiled++ + } + for _, sch := range *q { + c.schemas[sch.up] = sch + } + return c.schemas[up], nil +} + +func (c *Compiler) compileValue(v any, sch *Schema, r *root, q *queue) error { + res := r.resource(sch.up.ptr) + sch.DraftVersion = res.dialect.draft.version + + base := urlPtr{sch.up.url, res.ptr} + sch.resource = c.enqueue(q, base) + + // if resource, enqueue dynamic anchors for compilation + if sch.DraftVersion >= 2020 && sch.up == sch.resource.up { + res := r.resource(sch.up.ptr) + for anchor, anchorPtr := range res.anchors { + if slices.Contains(res.dynamicAnchors, anchor) { + up := urlPtr{sch.up.url, anchorPtr} + danchorSch := c.enqueue(q, up) + if sch.dynamicAnchors == nil { + sch.dynamicAnchors = map[string]*Schema{} + } + sch.dynamicAnchors[string(anchor)] = danchorSch + } + } + } + + switch v := v.(type) { + case bool: + sch.Bool = &v + case map[string]any: + if err := c.compileObject(v, sch, r, q); err != nil { + return err + } + } + + sch.allPropsEvaluated = sch.AdditionalProperties != nil + if sch.DraftVersion < 2020 { + sch.allItemsEvaluated = sch.AdditionalItems != nil + switch items := sch.Items.(type) { + case *Schema: + sch.allItemsEvaluated = true + case []*Schema: + sch.numItemsEvaluated = len(items) + } + } else { + sch.allItemsEvaluated = sch.Items2020 != nil + sch.numItemsEvaluated = len(sch.PrefixItems) + } + + return nil +} + +func (c *Compiler) compileObject(obj map[string]any, sch *Schema, r *root, q *queue) error { + if len(obj) == 0 { + b := true + sch.Bool = &b + return nil + } + oc := objCompiler{ + c: c, + obj: obj, + up: sch.up, + r: r, + res: r.resource(sch.up.ptr), + q: q, + } + return oc.compile(sch) +} + +// queue -- + +type queue []*Schema + +func (q *queue) append(sch *Schema) { + *q = append(*q, sch) +} + +func (q *queue) at(i int) *Schema { + return (*q)[i] +} + +func (q *queue) len() int { + return len(*q) +} + +func (q *queue) get(up urlPtr) *Schema { + i := slices.IndexFunc(*q, func(sch *Schema) bool { return sch.up == up }) + if i != -1 { + return (*q)[i] + } + return nil +} + +// regexp -- + +// Regexp is the representation of compiled regular expression. +type Regexp interface { + fmt.Stringer + + // MatchString reports whether the string s contains + // any match of the regular expression. + MatchString(string) bool +} + +// RegexpEngine parses a regular expression and returns, +// if successful, a Regexp object that can be used to +// match against text. +type RegexpEngine func(string) (Regexp, error) + +func (re RegexpEngine) validate(v any) error { + s, ok := v.(string) + if !ok { + return nil + } + _, err := re(s) + return err +} + +func goRegexpCompile(s string) (Regexp, error) { + return regexp.Compile(s) +} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v6/content.go b/vendor/github.com/santhosh-tekuri/jsonschema/v6/content.go new file mode 100644 index 00000000..8d62e58b --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v6/content.go @@ -0,0 +1,51 @@ +package jsonschema + +import ( + "bytes" + "encoding/base64" + "encoding/json" +) + +// Decoder specifies how to decode specific contentEncoding. +type Decoder struct { + // Name of contentEncoding. + Name string + // Decode given string to byte array. + Decode func(string) ([]byte, error) +} + +var decoders = map[string]*Decoder{ + "base64": { + Name: "base64", + Decode: func(s string) ([]byte, error) { + return base64.StdEncoding.DecodeString(s) + }, + }, +} + +// MediaType specified how to validate bytes against specific contentMediaType. +type MediaType struct { + // Name of contentMediaType. + Name string + + // Validate checks whether bytes conform to this mediatype. + Validate func([]byte) error + + // UnmarshalJSON unmarshals bytes into json value. + // This must be nil if this mediatype is not compatible + // with json. + UnmarshalJSON func([]byte) (any, error) +} + +var mediaTypes = map[string]*MediaType{ + "application/json": { + Name: "application/json", + Validate: func(b []byte) error { + var v any + return json.Unmarshal(b, &v) + }, + UnmarshalJSON: func(b []byte) (any, error) { + return UnmarshalJSON(bytes.NewReader(b)) + }, + }, +} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v6/draft.go b/vendor/github.com/santhosh-tekuri/jsonschema/v6/draft.go new file mode 100644 index 00000000..fd09bae8 --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v6/draft.go @@ -0,0 +1,360 @@ +package jsonschema + +import ( + "fmt" + "slices" + "strings" +) + +// A Draft represents json-schema specification. +type Draft struct { + version int + url string + sch *Schema + id string // property name used to represent id + subschemas []SchemaPath // locations of subschemas + vocabPrefix string // prefix used for vocabulary + allVocabs map[string]*Schema // names of supported vocabs with its schemas + defaultVocabs []string // names of default vocabs +} + +// String returns the specification url. +func (d *Draft) String() string { + return d.url +} + +var ( + Draft4 = &Draft{ + version: 4, + url: "http://json-schema.org/draft-04/schema", + id: "id", + subschemas: []SchemaPath{ + // type agonistic + schemaPath("definitions/*"), + schemaPath("not"), + schemaPath("allOf/[]"), + schemaPath("anyOf/[]"), + schemaPath("oneOf/[]"), + // object + schemaPath("properties/*"), + schemaPath("additionalProperties"), + schemaPath("patternProperties/*"), + // array + schemaPath("items"), + schemaPath("items/[]"), + schemaPath("additionalItems"), + schemaPath("dependencies/*"), + }, + vocabPrefix: "", + allVocabs: map[string]*Schema{}, + defaultVocabs: []string{}, + } + + Draft6 = &Draft{ + version: 6, + url: "http://json-schema.org/draft-06/schema", + id: "$id", + subschemas: joinSubschemas(Draft4.subschemas, + schemaPath("propertyNames"), + schemaPath("contains"), + ), + vocabPrefix: "", + allVocabs: map[string]*Schema{}, + defaultVocabs: []string{}, + } + + Draft7 = &Draft{ + version: 7, + url: "http://json-schema.org/draft-07/schema", + id: "$id", + subschemas: joinSubschemas(Draft6.subschemas, + schemaPath("if"), + schemaPath("then"), + schemaPath("else"), + ), + vocabPrefix: "", + allVocabs: map[string]*Schema{}, + defaultVocabs: []string{}, + } + + Draft2019 = &Draft{ + version: 2019, + url: "https://json-schema.org/draft/2019-09/schema", + id: "$id", + subschemas: joinSubschemas(Draft7.subschemas, + schemaPath("$defs/*"), + schemaPath("dependentSchemas/*"), + schemaPath("unevaluatedProperties"), + schemaPath("unevaluatedItems"), + schemaPath("contentSchema"), + ), + vocabPrefix: "https://json-schema.org/draft/2019-09/vocab/", + allVocabs: map[string]*Schema{ + "core": nil, + "applicator": nil, + "validation": nil, + "meta-data": nil, + "format": nil, + "content": nil, + }, + defaultVocabs: []string{"core", "applicator", "validation"}, + } + + Draft2020 = &Draft{ + version: 2020, + url: "https://json-schema.org/draft/2020-12/schema", + id: "$id", + subschemas: joinSubschemas(Draft2019.subschemas, + schemaPath("prefixItems/[]"), + ), + vocabPrefix: "https://json-schema.org/draft/2020-12/vocab/", + allVocabs: map[string]*Schema{ + "core": nil, + "applicator": nil, + "unevaluated": nil, + "validation": nil, + "meta-data": nil, + "format-annotation": nil, + "format-assertion": nil, + "content": nil, + }, + defaultVocabs: []string{"core", "applicator", "unevaluated", "validation"}, + } + + draftLatest = Draft2020 +) + +func init() { + c := NewCompiler() + c.AssertFormat() + for _, d := range []*Draft{Draft4, Draft6, Draft7, Draft2019, Draft2020} { + d.sch = c.MustCompile(d.url) + for name := range d.allVocabs { + d.allVocabs[name] = c.MustCompile(strings.TrimSuffix(d.url, "schema") + "meta/" + name) + } + } +} + +func draftFromURL(url string) *Draft { + u, frag := split(url) + if frag != "" { + return nil + } + u, ok := strings.CutPrefix(u, "http://") + if !ok { + u, _ = strings.CutPrefix(u, "https://") + } + switch u { + case "json-schema.org/schema": + return draftLatest + case "json-schema.org/draft/2020-12/schema": + return Draft2020 + case "json-schema.org/draft/2019-09/schema": + return Draft2019 + case "json-schema.org/draft-07/schema": + return Draft7 + case "json-schema.org/draft-06/schema": + return Draft6 + case "json-schema.org/draft-04/schema": + return Draft4 + default: + return nil + } +} + +func (d *Draft) getID(obj map[string]any) string { + if d.version < 2019 { + if _, ok := obj["$ref"]; ok { + // All other properties in a "$ref" object MUST be ignored + return "" + } + } + + id, ok := strVal(obj, d.id) + if !ok { + return "" + } + id, _ = split(id) // ignore fragment + return id +} + +func (d *Draft) getVocabs(url url, doc any, vocabularies map[string]*Vocabulary) ([]string, error) { + if d.version < 2019 { + return nil, nil + } + obj, ok := doc.(map[string]any) + if !ok { + return nil, nil + } + v, ok := obj["$vocabulary"] + if !ok { + return nil, nil + } + obj, ok = v.(map[string]any) + if !ok { + return nil, nil + } + + var vocabs []string + for vocab, reqd := range obj { + if reqd, ok := reqd.(bool); !ok || !reqd { + continue + } + name, ok := strings.CutPrefix(vocab, d.vocabPrefix) + if ok { + if _, ok := d.allVocabs[name]; ok { + if !slices.Contains(vocabs, name) { + vocabs = append(vocabs, name) + continue + } + } + } + if _, ok := vocabularies[vocab]; !ok { + return nil, &UnsupportedVocabularyError{url.String(), vocab} + } + if !slices.Contains(vocabs, vocab) { + vocabs = append(vocabs, vocab) + } + } + if !slices.Contains(vocabs, "core") { + vocabs = append(vocabs, "core") + } + return vocabs, nil +} + +// -- + +type dialect struct { + draft *Draft + vocabs []string // nil means use draft.defaultVocabs +} + +func (d *dialect) hasVocab(name string) bool { + if name == "core" || d.draft.version < 2019 { + return true + } + if d.vocabs != nil { + return slices.Contains(d.vocabs, name) + } + return slices.Contains(d.draft.defaultVocabs, name) +} + +func (d *dialect) activeVocabs(assertVocabs bool, vocabularies map[string]*Vocabulary) []string { + if len(vocabularies) == 0 { + return d.vocabs + } + if d.draft.version < 2019 { + assertVocabs = true + } + if !assertVocabs { + return d.vocabs + } + var vocabs []string + if d.vocabs == nil { + vocabs = slices.Clone(d.draft.defaultVocabs) + } else { + vocabs = slices.Clone(d.vocabs) + } + for vocab := range vocabularies { + if !slices.Contains(vocabs, vocab) { + vocabs = append(vocabs, vocab) + } + } + return vocabs +} + +func (d *dialect) getSchema(assertVocabs bool, vocabularies map[string]*Vocabulary) *Schema { + vocabs := d.activeVocabs(assertVocabs, vocabularies) + if vocabs == nil { + return d.draft.sch + } + + var allOf []*Schema + for _, vocab := range vocabs { + sch := d.draft.allVocabs[vocab] + if sch == nil { + if v, ok := vocabularies[vocab]; ok { + sch = v.Schema + } + } + if sch != nil { + allOf = append(allOf, sch) + } + } + if !slices.Contains(vocabs, "core") { + sch := d.draft.allVocabs["core"] + if sch == nil { + sch = d.draft.sch + } + allOf = append(allOf, sch) + } + sch := &Schema{ + Location: "urn:mem:metaschema", + up: urlPtr{url("urn:mem:metaschema"), ""}, + DraftVersion: d.draft.version, + AllOf: allOf, + } + sch.resource = sch + if sch.DraftVersion >= 2020 { + sch.DynamicAnchor = "meta" + sch.dynamicAnchors = map[string]*Schema{ + "meta": sch, + } + } + return sch +} + +// -- + +type ParseIDError struct { + URL string +} + +func (e *ParseIDError) Error() string { + return fmt.Sprintf("error in parsing id at %q", e.URL) +} + +// -- + +type ParseAnchorError struct { + URL string +} + +func (e *ParseAnchorError) Error() string { + return fmt.Sprintf("error in parsing anchor at %q", e.URL) +} + +// -- + +type DuplicateIDError struct { + ID string + URL string + Ptr1 string + Ptr2 string +} + +func (e *DuplicateIDError) Error() string { + return fmt.Sprintf("duplicate id %q in %q at %q and %q", e.ID, e.URL, e.Ptr1, e.Ptr2) +} + +// -- + +type DuplicateAnchorError struct { + Anchor string + URL string + Ptr1 string + Ptr2 string +} + +func (e *DuplicateAnchorError) Error() string { + return fmt.Sprintf("duplicate anchor %q in %q at %q and %q", e.Anchor, e.URL, e.Ptr1, e.Ptr2) +} + +// -- + +func joinSubschemas(a1 []SchemaPath, a2 ...SchemaPath) []SchemaPath { + var a []SchemaPath + a = append(a, a1...) + a = append(a, a2...) + return a +} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v6/format.go b/vendor/github.com/santhosh-tekuri/jsonschema/v6/format.go new file mode 100644 index 00000000..b78b22e2 --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v6/format.go @@ -0,0 +1,708 @@ +package jsonschema + +import ( + "net/netip" + gourl "net/url" + "strconv" + "strings" + "time" +) + +// Format defined specific format. +type Format struct { + // Name of format. + Name string + + // Validate checks if given value is of this format. + Validate func(v any) error +} + +var formats = map[string]*Format{ + "json-pointer": {"json-pointer", validateJSONPointer}, + "relative-json-pointer": {"relative-json-pointer", validateRelativeJSONPointer}, + "uuid": {"uuid", validateUUID}, + "duration": {"duration", validateDuration}, + "period": {"period", validatePeriod}, + "ipv4": {"ipv4", validateIPV4}, + "ipv6": {"ipv6", validateIPV6}, + "hostname": {"hostname", validateHostname}, + "email": {"email", validateEmail}, + "date": {"date", validateDate}, + "time": {"time", validateTime}, + "date-time": {"date-time", validateDateTime}, + "uri": {"uri", validateURI}, + "iri": {"iri", validateURI}, + "uri-reference": {"uri-reference", validateURIReference}, + "iri-reference": {"iri-reference", validateURIReference}, + "uri-template": {"uri-template", validateURITemplate}, + "semver": {"semver", validateSemver}, +} + +// see https://www.rfc-editor.org/rfc/rfc6901#section-3 +func validateJSONPointer(v any) error { + s, ok := v.(string) + if !ok { + return nil + } + if s == "" { + return nil + } + if !strings.HasPrefix(s, "/") { + return LocalizableError("not starting with /") + } + for _, tok := range strings.Split(s, "/")[1:] { + escape := false + for _, ch := range tok { + if escape { + escape = false + if ch != '0' && ch != '1' { + return LocalizableError("~ must be followed by 0 or 1") + } + continue + } + if ch == '~' { + escape = true + continue + } + switch { + case ch >= '\x00' && ch <= '\x2E': + case ch >= '\x30' && ch <= '\x7D': + case ch >= '\x7F' && ch <= '\U0010FFFF': + default: + return LocalizableError("invalid character %q", ch) + } + } + if escape { + return LocalizableError("~ must be followed by 0 or 1") + } + } + return nil +} + +// see https://tools.ietf.org/html/draft-handrews-relative-json-pointer-01#section-3 +func validateRelativeJSONPointer(v any) error { + s, ok := v.(string) + if !ok { + return nil + } + + // start with non-negative-integer + numDigits := 0 + for _, ch := range s { + if ch >= '0' && ch <= '9' { + numDigits++ + } else { + break + } + } + if numDigits == 0 { + return LocalizableError("must start with non-negative integer") + } + if numDigits > 1 && strings.HasPrefix(s, "0") { + return LocalizableError("starts with zero") + } + s = s[numDigits:] + + // followed by either json-pointer or '#' + if s == "#" { + return nil + } + return validateJSONPointer(s) +} + +// see https://datatracker.ietf.org/doc/html/rfc4122#page-4 +func validateUUID(v any) error { + s, ok := v.(string) + if !ok { + return nil + } + + hexGroups := []int{8, 4, 4, 4, 12} + groups := strings.Split(s, "-") + if len(groups) != len(hexGroups) { + return LocalizableError("must have %d elements", len(hexGroups)) + } + for i, group := range groups { + if len(group) != hexGroups[i] { + return LocalizableError("element %d must be %d characters long", i+1, hexGroups[i]) + } + for _, ch := range group { + switch { + case ch >= '0' && ch <= '9': + case ch >= 'a' && ch <= 'f': + case ch >= 'A' && ch <= 'F': + default: + return LocalizableError("non-hex character %q", ch) + } + } + } + return nil +} + +// see https://datatracker.ietf.org/doc/html/rfc3339#appendix-A +func validateDuration(v any) error { + s, ok := v.(string) + if !ok { + return nil + } + + // must start with 'P' + s, ok = strings.CutPrefix(s, "P") + if !ok { + return LocalizableError("must start with P") + } + if s == "" { + return LocalizableError("nothing after P") + } + + // dur-week + if s, ok := strings.CutSuffix(s, "W"); ok { + if s == "" { + return LocalizableError("no number in week") + } + for _, ch := range s { + if ch < '0' || ch > '9' { + return LocalizableError("invalid week") + } + } + return nil + } + + allUnits := []string{"YMD", "HMS"} + for i, s := range strings.Split(s, "T") { + if i != 0 && s == "" { + return LocalizableError("no time elements") + } + if i >= len(allUnits) { + return LocalizableError("more than one T") + } + units := allUnits[i] + for s != "" { + digitCount := 0 + for _, ch := range s { + if ch >= '0' && ch <= '9' { + digitCount++ + } else { + break + } + } + if digitCount == 0 { + return LocalizableError("missing number") + } + s = s[digitCount:] + if s == "" { + return LocalizableError("missing unit") + } + unit := s[0] + j := strings.IndexByte(units, unit) + if j == -1 { + if strings.IndexByte(allUnits[i], unit) != -1 { + return LocalizableError("unit %q out of order", unit) + } + return LocalizableError("invalid unit %q", unit) + } + units = units[j+1:] + s = s[1:] + } + } + + return nil +} + +func validateIPV4(v any) error { + s, ok := v.(string) + if !ok { + return nil + } + groups := strings.Split(s, ".") + if len(groups) != 4 { + return LocalizableError("expected four decimals") + } + for _, group := range groups { + if len(group) > 1 && group[0] == '0' { + return LocalizableError("leading zeros") + } + n, err := strconv.Atoi(group) + if err != nil { + return err + } + if n < 0 || n > 255 { + return LocalizableError("decimal must be between 0 and 255") + } + } + return nil +} + +func validateIPV6(v any) error { + s, ok := v.(string) + if !ok { + return nil + } + if !strings.Contains(s, ":") { + return LocalizableError("missing colon") + } + addr, err := netip.ParseAddr(s) + if err != nil { + return err + } + if addr.Zone() != "" { + return LocalizableError("zone id is not a part of ipv6 address") + } + return nil +} + +// see https://en.wikipedia.org/wiki/Hostname#Restrictions_on_valid_host_names +func validateHostname(v any) error { + s, ok := v.(string) + if !ok { + return nil + } + + // entire hostname (including the delimiting dots but not a trailing dot) has a maximum of 253 ASCII characters + s = strings.TrimSuffix(s, ".") + if len(s) > 253 { + return LocalizableError("more than 253 characters long") + } + + // Hostnames are composed of series of labels concatenated with dots, as are all domain names + for _, label := range strings.Split(s, ".") { + // Each label must be from 1 to 63 characters long + if len(label) < 1 || len(label) > 63 { + return LocalizableError("label must be 1 to 63 characters long") + } + + // labels must not start or end with a hyphen + if strings.HasPrefix(label, "-") { + return LocalizableError("label starts with hyphen") + } + if strings.HasSuffix(label, "-") { + return LocalizableError("label ends with hyphen") + } + + // labels may contain only the ASCII letters 'a' through 'z' (in a case-insensitive manner), + // the digits '0' through '9', and the hyphen ('-') + for _, ch := range label { + switch { + case ch >= 'a' && ch <= 'z': + case ch >= 'A' && ch <= 'Z': + case ch >= '0' && ch <= '9': + case ch == '-': + default: + return LocalizableError("invalid character %q", ch) + } + } + } + return nil +} + +// see https://en.wikipedia.org/wiki/Email_address +func validateEmail(v any) error { + s, ok := v.(string) + if !ok { + return nil + } + // entire email address to be no more than 254 characters long + if len(s) > 254 { + return LocalizableError("more than 255 characters long") + } + + // email address is generally recognized as having two parts joined with an at-sign + at := strings.LastIndexByte(s, '@') + if at == -1 { + return LocalizableError("missing @") + } + local, domain := s[:at], s[at+1:] + + // local part may be up to 64 characters long + if len(local) > 64 { + return LocalizableError("local part more than 64 characters long") + } + + if len(local) > 1 && strings.HasPrefix(local, `"`) && strings.HasPrefix(local, `"`) { + // quoted + local := local[1 : len(local)-1] + if strings.IndexByte(local, '\\') != -1 || strings.IndexByte(local, '"') != -1 { + return LocalizableError("backslash and quote are not allowed within quoted local part") + } + } else { + // unquoted + if strings.HasPrefix(local, ".") { + return LocalizableError("starts with dot") + } + if strings.HasSuffix(local, ".") { + return LocalizableError("ends with dot") + } + + // consecutive dots not allowed + if strings.Contains(local, "..") { + return LocalizableError("consecutive dots") + } + + // check allowed chars + for _, ch := range local { + switch { + case ch >= 'a' && ch <= 'z': + case ch >= 'A' && ch <= 'Z': + case ch >= '0' && ch <= '9': + case strings.ContainsRune(".!#$%&'*+-/=?^_`{|}~", ch): + default: + return LocalizableError("invalid character %q", ch) + } + } + } + + // domain if enclosed in brackets, must match an IP address + if strings.HasPrefix(domain, "[") && strings.HasSuffix(domain, "]") { + domain = domain[1 : len(domain)-1] + if rem, ok := strings.CutPrefix(domain, "IPv6:"); ok { + if err := validateIPV6(rem); err != nil { + return LocalizableError("invalid ipv6 address: %v", err) + } + return nil + } + if err := validateIPV4(domain); err != nil { + return LocalizableError("invalid ipv4 address: %v", err) + } + return nil + } + + // domain must match the requirements for a hostname + if err := validateHostname(domain); err != nil { + return LocalizableError("invalid domain: %v", err) + } + + return nil +} + +// see see https://datatracker.ietf.org/doc/html/rfc3339#section-5.6 +func validateDate(v any) error { + s, ok := v.(string) + if !ok { + return nil + } + _, err := time.Parse("2006-01-02", s) + return err +} + +// see https://datatracker.ietf.org/doc/html/rfc3339#section-5.6 +// NOTE: golang time package does not support leap seconds. +func validateTime(v any) error { + str, ok := v.(string) + if !ok { + return nil + } + + // min: hh:mm:ssZ + if len(str) < 9 { + return LocalizableError("less than 9 characters long") + } + if str[2] != ':' || str[5] != ':' { + return LocalizableError("missing colon in correct place") + } + + // parse hh:mm:ss + var hms []int + for _, tok := range strings.SplitN(str[:8], ":", 3) { + i, err := strconv.Atoi(tok) + if err != nil { + return LocalizableError("invalid hour/min/sec") + } + if i < 0 { + return LocalizableError("non-positive hour/min/sec") + } + hms = append(hms, i) + } + if len(hms) != 3 { + return LocalizableError("missing hour/min/sec") + } + h, m, s := hms[0], hms[1], hms[2] + if h > 23 || m > 59 || s > 60 { + return LocalizableError("hour/min/sec out of range") + } + str = str[8:] + + // parse sec-frac if present + if rem, ok := strings.CutPrefix(str, "."); ok { + numDigits := 0 + for _, ch := range rem { + if ch >= '0' && ch <= '9' { + numDigits++ + } else { + break + } + } + if numDigits == 0 { + return LocalizableError("no digits in second fraction") + } + str = rem[numDigits:] + } + + if str != "z" && str != "Z" { + // parse time-numoffset + if len(str) != 6 { + return LocalizableError("offset must be 6 characters long") + } + var sign int + switch str[0] { + case '+': + sign = -1 + case '-': + sign = +1 + default: + return LocalizableError("offset must begin with plus/minus") + } + str = str[1:] + if str[2] != ':' { + return LocalizableError("missing colon in offset in correct place") + } + + var zhm []int + for _, tok := range strings.SplitN(str, ":", 2) { + i, err := strconv.Atoi(tok) + if err != nil { + return LocalizableError("invalid hour/min in offset") + } + if i < 0 { + return LocalizableError("non-positive hour/min in offset") + } + zhm = append(zhm, i) + } + zh, zm := zhm[0], zhm[1] + if zh > 23 || zm > 59 { + return LocalizableError("hour/min in offset out of range") + } + + // apply timezone + hm := (h*60 + m) + sign*(zh*60+zm) + if hm < 0 { + hm += 24 * 60 + } + h, m = hm/60, hm%60 + } + + // check leap second + if s >= 60 && (h != 23 || m != 59) { + return LocalizableError("invalid leap second") + } + + return nil +} + +// see https://datatracker.ietf.org/doc/html/rfc3339#section-5.6 +func validateDateTime(v any) error { + s, ok := v.(string) + if !ok { + return nil + } + + // min: yyyy-mm-ddThh:mm:ssZ + if len(s) < 20 { + return LocalizableError("less than 20 characters long") + } + + if s[10] != 't' && s[10] != 'T' { + return LocalizableError("11th character must be t or T") + } + if err := validateDate(s[:10]); err != nil { + return LocalizableError("invalid date element: %v", err) + } + if err := validateTime(s[11:]); err != nil { + return LocalizableError("invalid time element: %v", err) + } + return nil +} + +func parseURL(s string) (*gourl.URL, error) { + u, err := gourl.Parse(s) + if err != nil { + return nil, err + } + + // gourl does not validate ipv6 host address + hostName := u.Hostname() + if strings.Contains(hostName, ":") { + if !strings.Contains(u.Host, "[") || !strings.Contains(u.Host, "]") { + return nil, LocalizableError("ipv6 address not enclosed in brackets") + } + if err := validateIPV6(hostName); err != nil { + return nil, LocalizableError("invalid ipv6 address: %v", err) + } + } + + return u, nil +} + +func validateURI(v any) error { + s, ok := v.(string) + if !ok { + return nil + } + u, err := parseURL(s) + if err != nil { + return err + } + if !u.IsAbs() { + return LocalizableError("relative url") + } + return nil +} + +func validateURIReference(v any) error { + s, ok := v.(string) + if !ok { + return nil + } + if strings.Contains(s, `\`) { + return LocalizableError(`contains \`) + } + _, err := parseURL(s) + return err +} + +func validateURITemplate(v any) error { + s, ok := v.(string) + if !ok { + return nil + } + u, err := parseURL(s) + if err != nil { + return err + } + for _, tok := range strings.Split(u.RawPath, "/") { + tok, err = decode(tok) + if err != nil { + return LocalizableError("percent decode failed: %v", err) + } + want := true + for _, ch := range tok { + var got bool + switch ch { + case '{': + got = true + case '}': + got = false + default: + continue + } + if got != want { + return LocalizableError("nested curly braces") + } + want = !want + } + if !want { + return LocalizableError("no matching closing brace") + } + } + return nil +} + +func validatePeriod(v any) error { + s, ok := v.(string) + if !ok { + return nil + } + + slash := strings.IndexByte(s, '/') + if slash == -1 { + return LocalizableError("missing slash") + } + + start, end := s[:slash], s[slash+1:] + if strings.HasPrefix(start, "P") { + if err := validateDuration(start); err != nil { + return LocalizableError("invalid start duration: %v", err) + } + if err := validateDateTime(end); err != nil { + return LocalizableError("invalid end date-time: %v", err) + } + } else { + if err := validateDateTime(start); err != nil { + return LocalizableError("invalid start date-time: %v", err) + } + if strings.HasPrefix(end, "P") { + if err := validateDuration(end); err != nil { + return LocalizableError("invalid end duration: %v", err) + } + } else if err := validateDateTime(end); err != nil { + return LocalizableError("invalid end date-time: %v", err) + } + } + + return nil +} + +// see https://semver.org/#backusnaur-form-grammar-for-valid-semver-versions +func validateSemver(v any) error { + s, ok := v.(string) + if !ok { + return nil + } + + // build -- + if i := strings.IndexByte(s, '+'); i != -1 { + build := s[i+1:] + if build == "" { + return LocalizableError("build is empty") + } + for _, buildID := range strings.Split(build, ".") { + if buildID == "" { + return LocalizableError("build identifier is empty") + } + for _, ch := range buildID { + switch { + case ch >= '0' && ch <= '9': + case (ch >= 'a' && ch <= 'z') || (ch >= 'A' && ch <= 'Z') || ch == '-': + default: + return LocalizableError("invalid character %q in build identifier", ch) + } + } + } + s = s[:i] + } + + // pre-release -- + if i := strings.IndexByte(s, '-'); i != -1 { + preRelease := s[i+1:] + for _, preReleaseID := range strings.Split(preRelease, ".") { + if preReleaseID == "" { + return LocalizableError("pre-release identifier is empty") + } + allDigits := true + for _, ch := range preReleaseID { + switch { + case ch >= '0' && ch <= '9': + case (ch >= 'a' && ch <= 'z') || (ch >= 'A' && ch <= 'Z') || ch == '-': + allDigits = false + default: + return LocalizableError("invalid character %q in pre-release identifier", ch) + } + } + if allDigits && len(preReleaseID) > 1 && preReleaseID[0] == '0' { + return LocalizableError("pre-release numeric identifier starts with zero") + } + } + s = s[:i] + } + + // versionCore -- + versions := strings.Split(s, ".") + if len(versions) != 3 { + return LocalizableError("versionCore must have 3 numbers separated by dot") + } + names := []string{"major", "minor", "patch"} + for i, version := range versions { + if version == "" { + return LocalizableError("%s is empty", names[i]) + } + if len(version) > 1 && version[0] == '0' { + return LocalizableError("%s starts with zero", names[i]) + } + for _, ch := range version { + if ch < '0' || ch > '9' { + return LocalizableError("%s contains non-digit", names[i]) + } + } + } + + return nil +} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v6/go.work b/vendor/github.com/santhosh-tekuri/jsonschema/v6/go.work new file mode 100644 index 00000000..13df855d --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v6/go.work @@ -0,0 +1,8 @@ +go 1.21.1 + +use ( + . + ./cmd/jv +) + +replace github.com/santhosh-tekuri/jsonschema/v6 v6.0.0 => ./ diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v6/kind/kind.go b/vendor/github.com/santhosh-tekuri/jsonschema/v6/kind/kind.go new file mode 100644 index 00000000..7da112ac --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v6/kind/kind.go @@ -0,0 +1,651 @@ +package kind + +import ( + "fmt" + "math/big" + "strings" + + "golang.org/x/text/message" +) + +// -- + +type InvalidJsonValue struct { + Value any +} + +func (*InvalidJsonValue) KeywordPath() []string { + return nil +} + +func (k *InvalidJsonValue) LocalizedString(p *message.Printer) string { + return p.Sprintf("invalid jsonType %T", k.Value) +} + +// -- + +type Schema struct { + Location string +} + +func (*Schema) KeywordPath() []string { + return nil +} + +func (k *Schema) LocalizedString(p *message.Printer) string { + return p.Sprintf("jsonschema validation failed with %s", quote(k.Location)) +} + +// -- + +type Group struct{} + +func (*Group) KeywordPath() []string { + return nil +} + +func (*Group) LocalizedString(p *message.Printer) string { + return p.Sprintf("validation failed") +} + +// -- + +type Not struct{} + +func (*Not) KeywordPath() []string { + return nil +} + +func (*Not) LocalizedString(p *message.Printer) string { + return p.Sprintf("not failed") +} + +// -- + +type AllOf struct{} + +func (*AllOf) KeywordPath() []string { + return []string{"allOf"} +} + +func (*AllOf) LocalizedString(p *message.Printer) string { + return p.Sprintf("allOf failed") +} + +// -- + +type AnyOf struct{} + +func (*AnyOf) KeywordPath() []string { + return []string{"anyOf"} +} + +func (*AnyOf) LocalizedString(p *message.Printer) string { + return p.Sprintf("anyOf failed") +} + +// -- + +type OneOf struct { + // Subschemas gives indexes of Subschemas that have matched. + // Value nil, means none of the subschemas matched. + Subschemas []int +} + +func (*OneOf) KeywordPath() []string { + return []string{"oneOf"} +} + +func (k *OneOf) LocalizedString(p *message.Printer) string { + if len(k.Subschemas) == 0 { + return p.Sprintf("oneOf failed, none matched") + } + return p.Sprintf("oneOf failed, subschemas %d, %d matched", k.Subschemas[0], k.Subschemas[1]) +} + +//-- + +type FalseSchema struct{} + +func (*FalseSchema) KeywordPath() []string { + return nil +} + +func (*FalseSchema) LocalizedString(p *message.Printer) string { + return p.Sprintf("false schema") +} + +// -- + +type RefCycle struct { + URL string + KeywordLocation1 string + KeywordLocation2 string +} + +func (*RefCycle) KeywordPath() []string { + return nil +} + +func (k *RefCycle) LocalizedString(p *message.Printer) string { + return p.Sprintf("both %s and %s resolve to %q causing reference cycle", k.KeywordLocation1, k.KeywordLocation2, k.URL) +} + +// -- + +type Type struct { + Got string + Want []string +} + +func (*Type) KeywordPath() []string { + return []string{"type"} +} + +func (k *Type) LocalizedString(p *message.Printer) string { + want := strings.Join(k.Want, " or ") + return p.Sprintf("got %s, want %s", k.Got, want) +} + +// -- + +type Enum struct { + Got any + Want []any +} + +// KeywordPath implements jsonschema.ErrorKind. +func (*Enum) KeywordPath() []string { + return []string{"enum"} +} + +func (k *Enum) LocalizedString(p *message.Printer) string { + allPrimitive := true +loop: + for _, item := range k.Want { + switch item.(type) { + case []any, map[string]any: + allPrimitive = false + break loop + } + } + if allPrimitive { + if len(k.Want) == 1 { + return p.Sprintf("value must be %s", display(k.Want[0])) + } + var want []string + for _, v := range k.Want { + want = append(want, display(v)) + } + return p.Sprintf("value must be one of %s", strings.Join(want, ", ")) + } + return p.Sprintf("enum failed") +} + +// -- + +type Const struct { + Got any + Want any +} + +func (*Const) KeywordPath() []string { + return []string{"const"} +} + +func (k *Const) LocalizedString(p *message.Printer) string { + switch want := k.Want.(type) { + case []any, map[string]any: + return p.Sprintf("const failed") + default: + return p.Sprintf("value must be %s", display(want)) + } +} + +// -- + +type Format struct { + Got any + Want string + Err error +} + +func (*Format) KeywordPath() []string { + return []string{"format"} +} + +func (k *Format) LocalizedString(p *message.Printer) string { + return p.Sprintf("%s is not valid %s: %v", display(k.Got), k.Want, localizedError(k.Err, p)) +} + +// -- + +type Reference struct { + Keyword string + URL string +} + +func (k *Reference) KeywordPath() []string { + return []string{k.Keyword} +} + +func (*Reference) LocalizedString(p *message.Printer) string { + return p.Sprintf("validation failed") +} + +// -- + +type MinProperties struct { + Got, Want int +} + +func (*MinProperties) KeywordPath() []string { + return []string{"minProperties"} +} + +func (k *MinProperties) LocalizedString(p *message.Printer) string { + return p.Sprintf("minProperties: got %d, want %d", k.Got, k.Want) +} + +// -- + +type MaxProperties struct { + Got, Want int +} + +func (*MaxProperties) KeywordPath() []string { + return []string{"maxProperties"} +} + +func (k *MaxProperties) LocalizedString(p *message.Printer) string { + return p.Sprintf("maxProperties: got %d, want %d", k.Got, k.Want) +} + +// -- + +type MinItems struct { + Got, Want int +} + +func (*MinItems) KeywordPath() []string { + return []string{"minItems"} +} + +func (k *MinItems) LocalizedString(p *message.Printer) string { + return p.Sprintf("minItems: got %d, want %d", k.Got, k.Want) +} + +// -- + +type MaxItems struct { + Got, Want int +} + +func (*MaxItems) KeywordPath() []string { + return []string{"maxItems"} +} + +func (k *MaxItems) LocalizedString(p *message.Printer) string { + return p.Sprintf("maxItems: got %d, want %d", k.Got, k.Want) +} + +// -- + +type AdditionalItems struct { + Count int +} + +func (*AdditionalItems) KeywordPath() []string { + return []string{"additionalItems"} +} + +func (k *AdditionalItems) LocalizedString(p *message.Printer) string { + return p.Sprintf("last %d additionalItem(s) not allowed", k.Count) +} + +// -- + +type Required struct { + Missing []string +} + +func (*Required) KeywordPath() []string { + return []string{"required"} +} + +func (k *Required) LocalizedString(p *message.Printer) string { + if len(k.Missing) == 1 { + return p.Sprintf("missing property %s", quote(k.Missing[0])) + } + return p.Sprintf("missing properties %s", joinQuoted(k.Missing, ", ")) +} + +// -- + +type Dependency struct { + Prop string // dependency of prop that failed + Missing []string // missing props +} + +func (k *Dependency) KeywordPath() []string { + return []string{"dependency", k.Prop} +} + +func (k *Dependency) LocalizedString(p *message.Printer) string { + return p.Sprintf("properties %s required, if %s exists", joinQuoted(k.Missing, ", "), quote(k.Prop)) +} + +// -- + +type DependentRequired struct { + Prop string // dependency of prop that failed + Missing []string // missing props +} + +func (k *DependentRequired) KeywordPath() []string { + return []string{"dependentRequired", k.Prop} +} + +func (k *DependentRequired) LocalizedString(p *message.Printer) string { + return p.Sprintf("properties %s required, if %s exists", joinQuoted(k.Missing, ", "), quote(k.Prop)) +} + +// -- + +type AdditionalProperties struct { + Properties []string +} + +func (*AdditionalProperties) KeywordPath() []string { + return []string{"additionalProperties"} +} + +func (k *AdditionalProperties) LocalizedString(p *message.Printer) string { + return p.Sprintf("additional properties %s not allowed", joinQuoted(k.Properties, ", ")) +} + +// -- + +type PropertyNames struct { + Property string +} + +func (*PropertyNames) KeywordPath() []string { + return []string{"propertyNames"} +} + +func (k *PropertyNames) LocalizedString(p *message.Printer) string { + return p.Sprintf("invalid propertyName %s", quote(k.Property)) +} + +// -- + +type UniqueItems struct { + Duplicates [2]int +} + +func (*UniqueItems) KeywordPath() []string { + return []string{"uniqueItems"} +} + +func (k *UniqueItems) LocalizedString(p *message.Printer) string { + return p.Sprintf("items at %d and %d are equal", k.Duplicates[0], k.Duplicates[1]) +} + +// -- + +type Contains struct{} + +func (*Contains) KeywordPath() []string { + return []string{"contains"} +} + +func (*Contains) LocalizedString(p *message.Printer) string { + return p.Sprintf("no items match contains schema") +} + +// -- + +type MinContains struct { + Got []int + Want int +} + +func (*MinContains) KeywordPath() []string { + return []string{"minContains"} +} + +func (k *MinContains) LocalizedString(p *message.Printer) string { + if len(k.Got) == 0 { + return p.Sprintf("min %d items required to match contains schema, but none matched", k.Want) + } else { + got := fmt.Sprintf("%v", k.Got) + return p.Sprintf("min %d items required to match contains schema, but matched %d items at %v", k.Want, len(k.Got), got[1:len(got)-1]) + } +} + +// -- + +type MaxContains struct { + Got []int + Want int +} + +func (*MaxContains) KeywordPath() []string { + return []string{"maxContains"} +} + +func (k *MaxContains) LocalizedString(p *message.Printer) string { + got := fmt.Sprintf("%v", k.Got) + return p.Sprintf("max %d items required to match contains schema, but matched %d items at %v", k.Want, len(k.Got), got[1:len(got)-1]) +} + +// -- + +type MinLength struct { + Got, Want int +} + +func (*MinLength) KeywordPath() []string { + return []string{"minLength"} +} + +func (k *MinLength) LocalizedString(p *message.Printer) string { + return p.Sprintf("minLength: got %d, want %d", k.Got, k.Want) +} + +// -- + +type MaxLength struct { + Got, Want int +} + +func (*MaxLength) KeywordPath() []string { + return []string{"maxLength"} +} + +func (k *MaxLength) LocalizedString(p *message.Printer) string { + return p.Sprintf("maxLength: got %d, want %d", k.Got, k.Want) +} + +// -- + +type Pattern struct { + Got string + Want string +} + +func (*Pattern) KeywordPath() []string { + return []string{"pattern"} +} + +func (k *Pattern) LocalizedString(p *message.Printer) string { + return p.Sprintf("%s does not match pattern %s", quote(k.Got), quote(k.Want)) +} + +// -- + +type ContentEncoding struct { + Want string + Err error +} + +func (*ContentEncoding) KeywordPath() []string { + return []string{"contentEncoding"} +} + +func (k *ContentEncoding) LocalizedString(p *message.Printer) string { + return p.Sprintf("value is not %s encoded: %v", quote(k.Want), localizedError(k.Err, p)) +} + +// -- + +type ContentMediaType struct { + Got []byte + Want string + Err error +} + +func (*ContentMediaType) KeywordPath() []string { + return []string{"contentMediaType"} +} + +func (k *ContentMediaType) LocalizedString(p *message.Printer) string { + return p.Sprintf("value if not of mediatype %s: %v", quote(k.Want), k.Err) +} + +// -- + +type ContentSchema struct{} + +func (*ContentSchema) KeywordPath() []string { + return []string{"contentSchema"} +} + +func (*ContentSchema) LocalizedString(p *message.Printer) string { + return p.Sprintf("contentSchema failed") +} + +// -- + +type Minimum struct { + Got *big.Rat + Want *big.Rat +} + +func (*Minimum) KeywordPath() []string { + return []string{"minimum"} +} + +func (k *Minimum) LocalizedString(p *message.Printer) string { + got, _ := k.Got.Float64() + want, _ := k.Want.Float64() + return p.Sprintf("minimum: got %v, want %v", got, want) +} + +// -- + +type Maximum struct { + Got *big.Rat + Want *big.Rat +} + +func (*Maximum) KeywordPath() []string { + return []string{"maximum"} +} + +func (k *Maximum) LocalizedString(p *message.Printer) string { + got, _ := k.Got.Float64() + want, _ := k.Want.Float64() + return p.Sprintf("maximum: got %v, want %v", got, want) +} + +// -- + +type ExclusiveMinimum struct { + Got *big.Rat + Want *big.Rat +} + +func (*ExclusiveMinimum) KeywordPath() []string { + return []string{"exclusiveMinimum"} +} + +func (k *ExclusiveMinimum) LocalizedString(p *message.Printer) string { + got, _ := k.Got.Float64() + want, _ := k.Want.Float64() + return p.Sprintf("exclusiveMinimum: got %v, want %v", got, want) +} + +// -- + +type ExclusiveMaximum struct { + Got *big.Rat + Want *big.Rat +} + +func (*ExclusiveMaximum) KeywordPath() []string { + return []string{"exclusiveMaximum"} +} + +func (k *ExclusiveMaximum) LocalizedString(p *message.Printer) string { + got, _ := k.Got.Float64() + want, _ := k.Want.Float64() + return p.Sprintf("exclusiveMaximum: got %v, want %v", got, want) +} + +// -- + +type MultipleOf struct { + Got *big.Rat + Want *big.Rat +} + +func (*MultipleOf) KeywordPath() []string { + return []string{"multipleOf"} +} + +func (k *MultipleOf) LocalizedString(p *message.Printer) string { + got, _ := k.Got.Float64() + want, _ := k.Want.Float64() + return p.Sprintf("multipleOf: got %v, want %v", got, want) +} + +// -- + +func quote(s string) string { + s = fmt.Sprintf("%q", s) + s = strings.ReplaceAll(s, `\"`, `"`) + s = strings.ReplaceAll(s, `'`, `\'`) + return "'" + s[1:len(s)-1] + "'" +} + +func joinQuoted(arr []string, sep string) string { + var sb strings.Builder + for _, s := range arr { + if sb.Len() > 0 { + sb.WriteString(sep) + } + sb.WriteString(quote(s)) + } + return sb.String() +} + +// to be used only for primitive. +func display(v any) string { + switch v := v.(type) { + case string: + return quote(v) + case []any, map[string]any: + return "value" + default: + return fmt.Sprintf("%v", v) + } +} + +func localizedError(err error, p *message.Printer) string { + if err, ok := err.(interface{ LocalizedError(*message.Printer) string }); ok { + return err.LocalizedError(p) + } + return err.Error() +} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v6/loader.go b/vendor/github.com/santhosh-tekuri/jsonschema/v6/loader.go new file mode 100644 index 00000000..ce0170e2 --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v6/loader.go @@ -0,0 +1,266 @@ +package jsonschema + +import ( + "embed" + "encoding/json" + "errors" + "fmt" + "io" + "io/fs" + gourl "net/url" + "os" + "path/filepath" + "runtime" + "strings" +) + +// URLLoader knows how to load json from given url. +type URLLoader interface { + // Load loads json from given absolute url. + Load(url string) (any, error) +} + +// -- + +// FileLoader loads json file url. +type FileLoader struct{} + +func (l FileLoader) Load(url string) (any, error) { + path, err := l.ToFile(url) + if err != nil { + return nil, err + } + f, err := os.Open(path) + if err != nil { + return nil, err + } + defer f.Close() + return UnmarshalJSON(f) +} + +// ToFile is helper method to convert file url to file path. +func (l FileLoader) ToFile(url string) (string, error) { + u, err := gourl.Parse(url) + if err != nil { + return "", err + } + if u.Scheme != "file" { + return "", fmt.Errorf("invalid file url: %s", u) + } + path := u.Path + if runtime.GOOS == "windows" { + path = strings.TrimPrefix(path, "/") + path = filepath.FromSlash(path) + } + return path, nil +} + +// -- + +// SchemeURLLoader delegates to other [URLLoaders] +// based on url scheme. +type SchemeURLLoader map[string]URLLoader + +func (l SchemeURLLoader) Load(url string) (any, error) { + u, err := gourl.Parse(url) + if err != nil { + return nil, err + } + ll, ok := l[u.Scheme] + if !ok { + return nil, &UnsupportedURLSchemeError{u.String()} + } + return ll.Load(url) +} + +// -- + +//go:embed metaschemas +var metaFS embed.FS + +func openMeta(url string) (fs.File, error) { + u, meta := strings.CutPrefix(url, "http://json-schema.org/") + if !meta { + u, meta = strings.CutPrefix(url, "https://json-schema.org/") + } + if meta { + if u == "schema" { + return openMeta(draftLatest.url) + } + f, err := metaFS.Open("metaschemas/" + u) + if err != nil { + if errors.Is(err, fs.ErrNotExist) { + return nil, nil + } + return nil, err + } + return f, err + } + return nil, nil + +} + +func isMeta(url string) bool { + f, err := openMeta(url) + if err != nil { + return true + } + if f != nil { + f.Close() + return true + } + return false +} + +func loadMeta(url string) (any, error) { + f, err := openMeta(url) + if err != nil { + return nil, err + } + if f == nil { + return nil, nil + } + defer f.Close() + return UnmarshalJSON(f) +} + +// -- + +type defaultLoader struct { + docs map[url]any // docs loaded so far + loader URLLoader +} + +func (l *defaultLoader) add(url url, doc any) bool { + if _, ok := l.docs[url]; ok { + return false + } + l.docs[url] = doc + return true +} + +func (l *defaultLoader) load(url url) (any, error) { + if doc, ok := l.docs[url]; ok { + return doc, nil + } + doc, err := loadMeta(url.String()) + if err != nil { + return nil, err + } + if doc != nil { + l.add(url, doc) + return doc, nil + } + if l.loader == nil { + return nil, &LoadURLError{url.String(), errors.New("no URLLoader set")} + } + doc, err = l.loader.Load(url.String()) + if err != nil { + return nil, &LoadURLError{URL: url.String(), Err: err} + } + l.add(url, doc) + return doc, nil +} + +func (l *defaultLoader) getDraft(up urlPtr, doc any, defaultDraft *Draft, cycle map[url]struct{}) (*Draft, error) { + obj, ok := doc.(map[string]any) + if !ok { + return defaultDraft, nil + } + sch, ok := strVal(obj, "$schema") + if !ok { + return defaultDraft, nil + } + if draft := draftFromURL(sch); draft != nil { + return draft, nil + } + sch, _ = split(sch) + if _, err := gourl.Parse(sch); err != nil { + return nil, &InvalidMetaSchemaURLError{up.String(), err} + } + schUrl := url(sch) + if up.ptr.isEmpty() && schUrl == up.url { + return nil, &UnsupportedDraftError{schUrl.String()} + } + if _, ok := cycle[schUrl]; ok { + return nil, &MetaSchemaCycleError{schUrl.String()} + } + cycle[schUrl] = struct{}{} + doc, err := l.load(schUrl) + if err != nil { + return nil, err + } + return l.getDraft(urlPtr{schUrl, ""}, doc, defaultDraft, cycle) +} + +func (l *defaultLoader) getMetaVocabs(doc any, draft *Draft, vocabularies map[string]*Vocabulary) ([]string, error) { + obj, ok := doc.(map[string]any) + if !ok { + return nil, nil + } + sch, ok := strVal(obj, "$schema") + if !ok { + return nil, nil + } + if draft := draftFromURL(sch); draft != nil { + return nil, nil + } + sch, _ = split(sch) + if _, err := gourl.Parse(sch); err != nil { + return nil, &ParseURLError{sch, err} + } + schUrl := url(sch) + doc, err := l.load(schUrl) + if err != nil { + return nil, err + } + return draft.getVocabs(schUrl, doc, vocabularies) +} + +// -- + +type LoadURLError struct { + URL string + Err error +} + +func (e *LoadURLError) Error() string { + return fmt.Sprintf("failing loading %q: %v", e.URL, e.Err) +} + +// -- + +type UnsupportedURLSchemeError struct { + url string +} + +func (e *UnsupportedURLSchemeError) Error() string { + return fmt.Sprintf("no URLLoader registered for %q", e.url) +} + +// -- + +type ResourceExistsError struct { + url string +} + +func (e *ResourceExistsError) Error() string { + return fmt.Sprintf("resource for %q already exists", e.url) +} + +// -- + +// UnmarshalJSON unmarshals into [any] without losing +// number precision using [json.Number]. +func UnmarshalJSON(r io.Reader) (any, error) { + decoder := json.NewDecoder(r) + decoder.UseNumber() + var doc any + if err := decoder.Decode(&doc); err != nil { + return nil, err + } + if _, err := decoder.Token(); err == nil || err != io.EOF { + return nil, fmt.Errorf("invalid character after top-level value") + } + return doc, nil +} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft-04/schema b/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft-04/schema new file mode 100644 index 00000000..b2a7ff0f --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft-04/schema @@ -0,0 +1,151 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + "description": "Core schema meta-schema", + "definitions": { + "schemaArray": { + "type": "array", + "minItems": 1, + "items": { "$ref": "#" } + }, + "positiveInteger": { + "type": "integer", + "minimum": 0 + }, + "positiveIntegerDefault0": { + "allOf": [ { "$ref": "#/definitions/positiveInteger" }, { "default": 0 } ] + }, + "simpleTypes": { + "enum": [ "array", "boolean", "integer", "null", "number", "object", "string" ] + }, + "stringArray": { + "type": "array", + "items": { "type": "string" }, + "minItems": 1, + "uniqueItems": true + } + }, + "type": "object", + "properties": { + "id": { + "type": "string", + "format": "uriref" + }, + "$schema": { + "type": "string", + "format": "uri" + }, + "title": { + "type": "string" + }, + "description": { + "type": "string" + }, + "default": {}, + "multipleOf": { + "type": "number", + "minimum": 0, + "exclusiveMinimum": true + }, + "maximum": { + "type": "number" + }, + "exclusiveMaximum": { + "type": "boolean", + "default": false + }, + "minimum": { + "type": "number" + }, + "exclusiveMinimum": { + "type": "boolean", + "default": false + }, + "maxLength": { "$ref": "#/definitions/positiveInteger" }, + "minLength": { "$ref": "#/definitions/positiveIntegerDefault0" }, + "pattern": { + "type": "string", + "format": "regex" + }, + "additionalItems": { + "anyOf": [ + { "type": "boolean" }, + { "$ref": "#" } + ], + "default": {} + }, + "items": { + "anyOf": [ + { "$ref": "#" }, + { "$ref": "#/definitions/schemaArray" } + ], + "default": {} + }, + "maxItems": { "$ref": "#/definitions/positiveInteger" }, + "minItems": { "$ref": "#/definitions/positiveIntegerDefault0" }, + "uniqueItems": { + "type": "boolean", + "default": false + }, + "maxProperties": { "$ref": "#/definitions/positiveInteger" }, + "minProperties": { "$ref": "#/definitions/positiveIntegerDefault0" }, + "required": { "$ref": "#/definitions/stringArray" }, + "additionalProperties": { + "anyOf": [ + { "type": "boolean" }, + { "$ref": "#" } + ], + "default": {} + }, + "definitions": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "default": {} + }, + "properties": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "default": {} + }, + "patternProperties": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "default": {} + }, + "dependencies": { + "type": "object", + "additionalProperties": { + "anyOf": [ + { "$ref": "#" }, + { "$ref": "#/definitions/stringArray" } + ] + } + }, + "enum": { + "type": "array", + "minItems": 1, + "uniqueItems": true + }, + "type": { + "anyOf": [ + { "$ref": "#/definitions/simpleTypes" }, + { + "type": "array", + "items": { "$ref": "#/definitions/simpleTypes" }, + "minItems": 1, + "uniqueItems": true + } + ] + }, + "allOf": { "$ref": "#/definitions/schemaArray" }, + "anyOf": { "$ref": "#/definitions/schemaArray" }, + "oneOf": { "$ref": "#/definitions/schemaArray" }, + "not": { "$ref": "#" }, + "format": { "type": "string" }, + "$ref": { "type": "string" } + }, + "dependencies": { + "exclusiveMaximum": [ "maximum" ], + "exclusiveMinimum": [ "minimum" ] + }, + "default": {} +} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft-06/schema b/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft-06/schema new file mode 100644 index 00000000..fa22ad1b --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft-06/schema @@ -0,0 +1,150 @@ +{ + "$schema": "http://json-schema.org/draft-06/schema#", + "$id": "http://json-schema.org/draft-06/schema#", + "title": "Core schema meta-schema", + "definitions": { + "schemaArray": { + "type": "array", + "minItems": 1, + "items": { "$ref": "#" } + }, + "nonNegativeInteger": { + "type": "integer", + "minimum": 0 + }, + "nonNegativeIntegerDefault0": { + "allOf": [ + { "$ref": "#/definitions/nonNegativeInteger" }, + { "default": 0 } + ] + }, + "simpleTypes": { + "enum": [ + "array", + "boolean", + "integer", + "null", + "number", + "object", + "string" + ] + }, + "stringArray": { + "type": "array", + "items": { "type": "string" }, + "uniqueItems": true, + "default": [] + } + }, + "type": ["object", "boolean"], + "properties": { + "$id": { + "type": "string", + "format": "uri-reference" + }, + "$schema": { + "type": "string", + "format": "uri" + }, + "$ref": { + "type": "string", + "format": "uri-reference" + }, + "title": { + "type": "string" + }, + "description": { + "type": "string" + }, + "default": {}, + "multipleOf": { + "type": "number", + "exclusiveMinimum": 0 + }, + "maximum": { + "type": "number" + }, + "exclusiveMaximum": { + "type": "number" + }, + "minimum": { + "type": "number" + }, + "exclusiveMinimum": { + "type": "number" + }, + "maxLength": { "$ref": "#/definitions/nonNegativeInteger" }, + "minLength": { "$ref": "#/definitions/nonNegativeIntegerDefault0" }, + "pattern": { + "type": "string", + "format": "regex" + }, + "additionalItems": { "$ref": "#" }, + "items": { + "anyOf": [ + { "$ref": "#" }, + { "$ref": "#/definitions/schemaArray" } + ], + "default": {} + }, + "maxItems": { "$ref": "#/definitions/nonNegativeInteger" }, + "minItems": { "$ref": "#/definitions/nonNegativeIntegerDefault0" }, + "uniqueItems": { + "type": "boolean", + "default": false + }, + "contains": { "$ref": "#" }, + "maxProperties": { "$ref": "#/definitions/nonNegativeInteger" }, + "minProperties": { "$ref": "#/definitions/nonNegativeIntegerDefault0" }, + "required": { "$ref": "#/definitions/stringArray" }, + "additionalProperties": { "$ref": "#" }, + "definitions": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "default": {} + }, + "properties": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "default": {} + }, + "patternProperties": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "default": {} + }, + "dependencies": { + "type": "object", + "additionalProperties": { + "anyOf": [ + { "$ref": "#" }, + { "$ref": "#/definitions/stringArray" } + ] + } + }, + "propertyNames": { "$ref": "#" }, + "const": {}, + "enum": { + "type": "array", + "minItems": 1, + "uniqueItems": true + }, + "type": { + "anyOf": [ + { "$ref": "#/definitions/simpleTypes" }, + { + "type": "array", + "items": { "$ref": "#/definitions/simpleTypes" }, + "minItems": 1, + "uniqueItems": true + } + ] + }, + "format": { "type": "string" }, + "allOf": { "$ref": "#/definitions/schemaArray" }, + "anyOf": { "$ref": "#/definitions/schemaArray" }, + "oneOf": { "$ref": "#/definitions/schemaArray" }, + "not": { "$ref": "#" } + }, + "default": {} +} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft-07/schema b/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft-07/schema new file mode 100644 index 00000000..326759a6 --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft-07/schema @@ -0,0 +1,172 @@ +{ + "$schema": "http://json-schema.org/draft-07/schema#", + "$id": "http://json-schema.org/draft-07/schema#", + "title": "Core schema meta-schema", + "definitions": { + "schemaArray": { + "type": "array", + "minItems": 1, + "items": { "$ref": "#" } + }, + "nonNegativeInteger": { + "type": "integer", + "minimum": 0 + }, + "nonNegativeIntegerDefault0": { + "allOf": [ + { "$ref": "#/definitions/nonNegativeInteger" }, + { "default": 0 } + ] + }, + "simpleTypes": { + "enum": [ + "array", + "boolean", + "integer", + "null", + "number", + "object", + "string" + ] + }, + "stringArray": { + "type": "array", + "items": { "type": "string" }, + "uniqueItems": true, + "default": [] + } + }, + "type": ["object", "boolean"], + "properties": { + "$id": { + "type": "string", + "format": "uri-reference" + }, + "$schema": { + "type": "string", + "format": "uri" + }, + "$ref": { + "type": "string", + "format": "uri-reference" + }, + "$comment": { + "type": "string" + }, + "title": { + "type": "string" + }, + "description": { + "type": "string" + }, + "default": true, + "readOnly": { + "type": "boolean", + "default": false + }, + "writeOnly": { + "type": "boolean", + "default": false + }, + "examples": { + "type": "array", + "items": true + }, + "multipleOf": { + "type": "number", + "exclusiveMinimum": 0 + }, + "maximum": { + "type": "number" + }, + "exclusiveMaximum": { + "type": "number" + }, + "minimum": { + "type": "number" + }, + "exclusiveMinimum": { + "type": "number" + }, + "maxLength": { "$ref": "#/definitions/nonNegativeInteger" }, + "minLength": { "$ref": "#/definitions/nonNegativeIntegerDefault0" }, + "pattern": { + "type": "string", + "format": "regex" + }, + "additionalItems": { "$ref": "#" }, + "items": { + "anyOf": [ + { "$ref": "#" }, + { "$ref": "#/definitions/schemaArray" } + ], + "default": true + }, + "maxItems": { "$ref": "#/definitions/nonNegativeInteger" }, + "minItems": { "$ref": "#/definitions/nonNegativeIntegerDefault0" }, + "uniqueItems": { + "type": "boolean", + "default": false + }, + "contains": { "$ref": "#" }, + "maxProperties": { "$ref": "#/definitions/nonNegativeInteger" }, + "minProperties": { "$ref": "#/definitions/nonNegativeIntegerDefault0" }, + "required": { "$ref": "#/definitions/stringArray" }, + "additionalProperties": { "$ref": "#" }, + "definitions": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "default": {} + }, + "properties": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "default": {} + }, + "patternProperties": { + "type": "object", + "additionalProperties": { "$ref": "#" }, + "propertyNames": { "format": "regex" }, + "default": {} + }, + "dependencies": { + "type": "object", + "additionalProperties": { + "anyOf": [ + { "$ref": "#" }, + { "$ref": "#/definitions/stringArray" } + ] + } + }, + "propertyNames": { "$ref": "#" }, + "const": true, + "enum": { + "type": "array", + "items": true, + "minItems": 1, + "uniqueItems": true + }, + "type": { + "anyOf": [ + { "$ref": "#/definitions/simpleTypes" }, + { + "type": "array", + "items": { "$ref": "#/definitions/simpleTypes" }, + "minItems": 1, + "uniqueItems": true + } + ] + }, + "format": { "type": "string" }, + "contentMediaType": { "type": "string" }, + "contentEncoding": { "type": "string" }, + "if": { "$ref": "#" }, + "then": { "$ref": "#" }, + "else": { "$ref": "#" }, + "allOf": { "$ref": "#/definitions/schemaArray" }, + "anyOf": { "$ref": "#/definitions/schemaArray" }, + "oneOf": { "$ref": "#/definitions/schemaArray" }, + "not": { "$ref": "#" } + }, + "default": true +} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2019-09/meta/applicator b/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2019-09/meta/applicator new file mode 100644 index 00000000..857d2d49 --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2019-09/meta/applicator @@ -0,0 +1,55 @@ +{ + "$schema": "https://json-schema.org/draft/2019-09/schema", + "$id": "https://json-schema.org/draft/2019-09/meta/applicator", + "$vocabulary": { + "https://json-schema.org/draft/2019-09/vocab/applicator": true + }, + "$recursiveAnchor": true, + "title": "Applicator vocabulary meta-schema", + "type": ["object", "boolean"], + "properties": { + "additionalItems": { "$recursiveRef": "#" }, + "unevaluatedItems": { "$recursiveRef": "#" }, + "items": { + "anyOf": [ + { "$recursiveRef": "#" }, + { "$ref": "#/$defs/schemaArray" } + ] + }, + "contains": { "$recursiveRef": "#" }, + "additionalProperties": { "$recursiveRef": "#" }, + "unevaluatedProperties": { "$recursiveRef": "#" }, + "properties": { + "type": "object", + "additionalProperties": { "$recursiveRef": "#" }, + "default": {} + }, + "patternProperties": { + "type": "object", + "additionalProperties": { "$recursiveRef": "#" }, + "propertyNames": { "format": "regex" }, + "default": {} + }, + "dependentSchemas": { + "type": "object", + "additionalProperties": { + "$recursiveRef": "#" + } + }, + "propertyNames": { "$recursiveRef": "#" }, + "if": { "$recursiveRef": "#" }, + "then": { "$recursiveRef": "#" }, + "else": { "$recursiveRef": "#" }, + "allOf": { "$ref": "#/$defs/schemaArray" }, + "anyOf": { "$ref": "#/$defs/schemaArray" }, + "oneOf": { "$ref": "#/$defs/schemaArray" }, + "not": { "$recursiveRef": "#" } + }, + "$defs": { + "schemaArray": { + "type": "array", + "minItems": 1, + "items": { "$recursiveRef": "#" } + } + } +} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2019-09/meta/content b/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2019-09/meta/content new file mode 100644 index 00000000..fa5d20b8 --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2019-09/meta/content @@ -0,0 +1,15 @@ +{ + "$schema": "https://json-schema.org/draft/2019-09/schema", + "$id": "https://json-schema.org/draft/2019-09/meta/content", + "$vocabulary": { + "https://json-schema.org/draft/2019-09/vocab/content": true + }, + "$recursiveAnchor": true, + "title": "Content vocabulary meta-schema", + "type": ["object", "boolean"], + "properties": { + "contentMediaType": { "type": "string" }, + "contentEncoding": { "type": "string" }, + "contentSchema": { "$recursiveRef": "#" } + } +} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2019-09/meta/core b/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2019-09/meta/core new file mode 100644 index 00000000..bf573198 --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2019-09/meta/core @@ -0,0 +1,56 @@ +{ + "$schema": "https://json-schema.org/draft/2019-09/schema", + "$id": "https://json-schema.org/draft/2019-09/meta/core", + "$vocabulary": { + "https://json-schema.org/draft/2019-09/vocab/core": true + }, + "$recursiveAnchor": true, + "title": "Core vocabulary meta-schema", + "type": ["object", "boolean"], + "properties": { + "$id": { + "type": "string", + "format": "uri-reference", + "$comment": "Non-empty fragments not allowed.", + "pattern": "^[^#]*#?$" + }, + "$schema": { + "type": "string", + "format": "uri" + }, + "$anchor": { + "type": "string", + "pattern": "^[A-Za-z][-A-Za-z0-9.:_]*$" + }, + "$ref": { + "type": "string", + "format": "uri-reference" + }, + "$recursiveRef": { + "type": "string", + "format": "uri-reference" + }, + "$recursiveAnchor": { + "type": "boolean", + "default": false + }, + "$vocabulary": { + "type": "object", + "propertyNames": { + "type": "string", + "format": "uri" + }, + "additionalProperties": { + "type": "boolean" + } + }, + "$comment": { + "type": "string" + }, + "$defs": { + "type": "object", + "additionalProperties": { "$recursiveRef": "#" }, + "default": {} + } + } +} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2019-09/meta/format b/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2019-09/meta/format new file mode 100644 index 00000000..fe553c23 --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2019-09/meta/format @@ -0,0 +1,13 @@ +{ + "$schema": "https://json-schema.org/draft/2019-09/schema", + "$id": "https://json-schema.org/draft/2019-09/meta/format", + "$vocabulary": { + "https://json-schema.org/draft/2019-09/vocab/format": true + }, + "$recursiveAnchor": true, + "title": "Format vocabulary meta-schema", + "type": ["object", "boolean"], + "properties": { + "format": { "type": "string" } + } +} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2019-09/meta/meta-data b/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2019-09/meta/meta-data new file mode 100644 index 00000000..5c95715c --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2019-09/meta/meta-data @@ -0,0 +1,35 @@ +{ + "$schema": "https://json-schema.org/draft/2019-09/schema", + "$id": "https://json-schema.org/draft/2019-09/meta/meta-data", + "$vocabulary": { + "https://json-schema.org/draft/2019-09/vocab/meta-data": true + }, + "$recursiveAnchor": true, + "title": "Meta-data vocabulary meta-schema", + "type": ["object", "boolean"], + "properties": { + "title": { + "type": "string" + }, + "description": { + "type": "string" + }, + "default": true, + "deprecated": { + "type": "boolean", + "default": false + }, + "readOnly": { + "type": "boolean", + "default": false + }, + "writeOnly": { + "type": "boolean", + "default": false + }, + "examples": { + "type": "array", + "items": true + } + } +} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2019-09/meta/validation b/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2019-09/meta/validation new file mode 100644 index 00000000..f3525e07 --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2019-09/meta/validation @@ -0,0 +1,97 @@ +{ + "$schema": "https://json-schema.org/draft/2019-09/schema", + "$id": "https://json-schema.org/draft/2019-09/meta/validation", + "$vocabulary": { + "https://json-schema.org/draft/2019-09/vocab/validation": true + }, + "$recursiveAnchor": true, + "title": "Validation vocabulary meta-schema", + "type": ["object", "boolean"], + "properties": { + "multipleOf": { + "type": "number", + "exclusiveMinimum": 0 + }, + "maximum": { + "type": "number" + }, + "exclusiveMaximum": { + "type": "number" + }, + "minimum": { + "type": "number" + }, + "exclusiveMinimum": { + "type": "number" + }, + "maxLength": { "$ref": "#/$defs/nonNegativeInteger" }, + "minLength": { "$ref": "#/$defs/nonNegativeIntegerDefault0" }, + "pattern": { + "type": "string", + "format": "regex" + }, + "maxItems": { "$ref": "#/$defs/nonNegativeInteger" }, + "minItems": { "$ref": "#/$defs/nonNegativeIntegerDefault0" }, + "uniqueItems": { + "type": "boolean", + "default": false + }, + "maxContains": { "$ref": "#/$defs/nonNegativeInteger" }, + "minContains": { + "$ref": "#/$defs/nonNegativeInteger", + "default": 1 + }, + "maxProperties": { "$ref": "#/$defs/nonNegativeInteger" }, + "minProperties": { "$ref": "#/$defs/nonNegativeIntegerDefault0" }, + "required": { "$ref": "#/$defs/stringArray" }, + "dependentRequired": { + "type": "object", + "additionalProperties": { + "$ref": "#/$defs/stringArray" + } + }, + "const": true, + "enum": { + "type": "array", + "items": true + }, + "type": { + "anyOf": [ + { "$ref": "#/$defs/simpleTypes" }, + { + "type": "array", + "items": { "$ref": "#/$defs/simpleTypes" }, + "minItems": 1, + "uniqueItems": true + } + ] + } + }, + "$defs": { + "nonNegativeInteger": { + "type": "integer", + "minimum": 0 + }, + "nonNegativeIntegerDefault0": { + "$ref": "#/$defs/nonNegativeInteger", + "default": 0 + }, + "simpleTypes": { + "enum": [ + "array", + "boolean", + "integer", + "null", + "number", + "object", + "string" + ] + }, + "stringArray": { + "type": "array", + "items": { "type": "string" }, + "uniqueItems": true, + "default": [] + } + } +} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2019-09/schema b/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2019-09/schema new file mode 100644 index 00000000..f433389b --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2019-09/schema @@ -0,0 +1,41 @@ +{ + "$schema": "https://json-schema.org/draft/2019-09/schema", + "$id": "https://json-schema.org/draft/2019-09/schema", + "$vocabulary": { + "https://json-schema.org/draft/2019-09/vocab/core": true, + "https://json-schema.org/draft/2019-09/vocab/applicator": true, + "https://json-schema.org/draft/2019-09/vocab/validation": true, + "https://json-schema.org/draft/2019-09/vocab/meta-data": true, + "https://json-schema.org/draft/2019-09/vocab/format": false, + "https://json-schema.org/draft/2019-09/vocab/content": true + }, + "$recursiveAnchor": true, + "title": "Core and Validation specifications meta-schema", + "allOf": [ + {"$ref": "meta/core"}, + {"$ref": "meta/applicator"}, + {"$ref": "meta/validation"}, + {"$ref": "meta/meta-data"}, + {"$ref": "meta/format"}, + {"$ref": "meta/content"} + ], + "type": ["object", "boolean"], + "properties": { + "definitions": { + "$comment": "While no longer an official keyword as it is replaced by $defs, this keyword is retained in the meta-schema to prevent incompatible extensions as it remains in common use.", + "type": "object", + "additionalProperties": { "$recursiveRef": "#" }, + "default": {} + }, + "dependencies": { + "$comment": "\"dependencies\" is no longer a keyword, but schema authors should avoid redefining it to facilitate a smooth transition to \"dependentSchemas\" and \"dependentRequired\"", + "type": "object", + "additionalProperties": { + "anyOf": [ + { "$recursiveRef": "#" }, + { "$ref": "meta/validation#/$defs/stringArray" } + ] + } + } + } +} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/applicator b/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/applicator new file mode 100644 index 00000000..0ef24edc --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/applicator @@ -0,0 +1,47 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://json-schema.org/draft/2020-12/meta/applicator", + "$vocabulary": { + "https://json-schema.org/draft/2020-12/vocab/applicator": true + }, + "$dynamicAnchor": "meta", + "title": "Applicator vocabulary meta-schema", + "type": ["object", "boolean"], + "properties": { + "prefixItems": { "$ref": "#/$defs/schemaArray" }, + "items": { "$dynamicRef": "#meta" }, + "contains": { "$dynamicRef": "#meta" }, + "additionalProperties": { "$dynamicRef": "#meta" }, + "properties": { + "type": "object", + "additionalProperties": { "$dynamicRef": "#meta" }, + "default": {} + }, + "patternProperties": { + "type": "object", + "additionalProperties": { "$dynamicRef": "#meta" }, + "propertyNames": { "format": "regex" }, + "default": {} + }, + "dependentSchemas": { + "type": "object", + "additionalProperties": { "$dynamicRef": "#meta" }, + "default": {} + }, + "propertyNames": { "$dynamicRef": "#meta" }, + "if": { "$dynamicRef": "#meta" }, + "then": { "$dynamicRef": "#meta" }, + "else": { "$dynamicRef": "#meta" }, + "allOf": { "$ref": "#/$defs/schemaArray" }, + "anyOf": { "$ref": "#/$defs/schemaArray" }, + "oneOf": { "$ref": "#/$defs/schemaArray" }, + "not": { "$dynamicRef": "#meta" } + }, + "$defs": { + "schemaArray": { + "type": "array", + "minItems": 1, + "items": { "$dynamicRef": "#meta" } + } + } +} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/content b/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/content new file mode 100644 index 00000000..0330ff0a --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/content @@ -0,0 +1,15 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://json-schema.org/draft/2020-12/meta/content", + "$vocabulary": { + "https://json-schema.org/draft/2020-12/vocab/content": true + }, + "$dynamicAnchor": "meta", + "title": "Content vocabulary meta-schema", + "type": ["object", "boolean"], + "properties": { + "contentEncoding": { "type": "string" }, + "contentMediaType": { "type": "string" }, + "contentSchema": { "$dynamicRef": "#meta" } + } +} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/core b/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/core new file mode 100644 index 00000000..c4de7005 --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/core @@ -0,0 +1,50 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://json-schema.org/draft/2020-12/meta/core", + "$vocabulary": { + "https://json-schema.org/draft/2020-12/vocab/core": true + }, + "$dynamicAnchor": "meta", + "title": "Core vocabulary meta-schema", + "type": ["object", "boolean"], + "properties": { + "$id": { + "$ref": "#/$defs/uriReferenceString", + "$comment": "Non-empty fragments not allowed.", + "pattern": "^[^#]*#?$" + }, + "$schema": { "$ref": "#/$defs/uriString" }, + "$ref": { "$ref": "#/$defs/uriReferenceString" }, + "$anchor": { "$ref": "#/$defs/anchorString" }, + "$dynamicRef": { "$ref": "#/$defs/uriReferenceString" }, + "$dynamicAnchor": { "$ref": "#/$defs/anchorString" }, + "$vocabulary": { + "type": "object", + "propertyNames": { "$ref": "#/$defs/uriString" }, + "additionalProperties": { + "type": "boolean" + } + }, + "$comment": { + "type": "string" + }, + "$defs": { + "type": "object", + "additionalProperties": { "$dynamicRef": "#meta" } + } + }, + "$defs": { + "anchorString": { + "type": "string", + "pattern": "^[A-Za-z_][-A-Za-z0-9._]*$" + }, + "uriString": { + "type": "string", + "format": "uri" + }, + "uriReferenceString": { + "type": "string", + "format": "uri-reference" + } + } +} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/format-annotation b/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/format-annotation new file mode 100644 index 00000000..0aa07d1c --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/format-annotation @@ -0,0 +1,13 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://json-schema.org/draft/2020-12/meta/format-annotation", + "$vocabulary": { + "https://json-schema.org/draft/2020-12/vocab/format-annotation": true + }, + "$dynamicAnchor": "meta", + "title": "Format vocabulary meta-schema for annotation results", + "type": ["object", "boolean"], + "properties": { + "format": { "type": "string" } + } +} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/format-assertion b/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/format-assertion new file mode 100644 index 00000000..38613bff --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/format-assertion @@ -0,0 +1,13 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://json-schema.org/draft/2020-12/meta/format-assertion", + "$vocabulary": { + "https://json-schema.org/draft/2020-12/vocab/format-assertion": true + }, + "$dynamicAnchor": "meta", + "title": "Format vocabulary meta-schema for assertion results", + "type": ["object", "boolean"], + "properties": { + "format": { "type": "string" } + } +} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/meta-data b/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/meta-data new file mode 100644 index 00000000..30e28371 --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/meta-data @@ -0,0 +1,35 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://json-schema.org/draft/2020-12/meta/meta-data", + "$vocabulary": { + "https://json-schema.org/draft/2020-12/vocab/meta-data": true + }, + "$dynamicAnchor": "meta", + "title": "Meta-data vocabulary meta-schema", + "type": ["object", "boolean"], + "properties": { + "title": { + "type": "string" + }, + "description": { + "type": "string" + }, + "default": true, + "deprecated": { + "type": "boolean", + "default": false + }, + "readOnly": { + "type": "boolean", + "default": false + }, + "writeOnly": { + "type": "boolean", + "default": false + }, + "examples": { + "type": "array", + "items": true + } + } +} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/unevaluated b/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/unevaluated new file mode 100644 index 00000000..e9e093d1 --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/unevaluated @@ -0,0 +1,14 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://json-schema.org/draft/2020-12/meta/unevaluated", + "$vocabulary": { + "https://json-schema.org/draft/2020-12/vocab/unevaluated": true + }, + "$dynamicAnchor": "meta", + "title": "Unevaluated applicator vocabulary meta-schema", + "type": ["object", "boolean"], + "properties": { + "unevaluatedItems": { "$dynamicRef": "#meta" }, + "unevaluatedProperties": { "$dynamicRef": "#meta" } + } +} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/validation b/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/validation new file mode 100644 index 00000000..4e016ed2 --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/meta/validation @@ -0,0 +1,97 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://json-schema.org/draft/2020-12/meta/validation", + "$vocabulary": { + "https://json-schema.org/draft/2020-12/vocab/validation": true + }, + "$dynamicAnchor": "meta", + "title": "Validation vocabulary meta-schema", + "type": ["object", "boolean"], + "properties": { + "type": { + "anyOf": [ + { "$ref": "#/$defs/simpleTypes" }, + { + "type": "array", + "items": { "$ref": "#/$defs/simpleTypes" }, + "minItems": 1, + "uniqueItems": true + } + ] + }, + "const": true, + "enum": { + "type": "array", + "items": true + }, + "multipleOf": { + "type": "number", + "exclusiveMinimum": 0 + }, + "maximum": { + "type": "number" + }, + "exclusiveMaximum": { + "type": "number" + }, + "minimum": { + "type": "number" + }, + "exclusiveMinimum": { + "type": "number" + }, + "maxLength": { "$ref": "#/$defs/nonNegativeInteger" }, + "minLength": { "$ref": "#/$defs/nonNegativeIntegerDefault0" }, + "pattern": { + "type": "string", + "format": "regex" + }, + "maxItems": { "$ref": "#/$defs/nonNegativeInteger" }, + "minItems": { "$ref": "#/$defs/nonNegativeIntegerDefault0" }, + "uniqueItems": { + "type": "boolean", + "default": false + }, + "maxContains": { "$ref": "#/$defs/nonNegativeInteger" }, + "minContains": { + "$ref": "#/$defs/nonNegativeInteger", + "default": 1 + }, + "maxProperties": { "$ref": "#/$defs/nonNegativeInteger" }, + "minProperties": { "$ref": "#/$defs/nonNegativeIntegerDefault0" }, + "required": { "$ref": "#/$defs/stringArray" }, + "dependentRequired": { + "type": "object", + "additionalProperties": { + "$ref": "#/$defs/stringArray" + } + } + }, + "$defs": { + "nonNegativeInteger": { + "type": "integer", + "minimum": 0 + }, + "nonNegativeIntegerDefault0": { + "$ref": "#/$defs/nonNegativeInteger", + "default": 0 + }, + "simpleTypes": { + "enum": [ + "array", + "boolean", + "integer", + "null", + "number", + "object", + "string" + ] + }, + "stringArray": { + "type": "array", + "items": { "type": "string" }, + "uniqueItems": true, + "default": [] + } + } +} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/schema b/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/schema new file mode 100644 index 00000000..364f8ada --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v6/metaschemas/draft/2020-12/schema @@ -0,0 +1,57 @@ +{ + "$schema": "https://json-schema.org/draft/2020-12/schema", + "$id": "https://json-schema.org/draft/2020-12/schema", + "$vocabulary": { + "https://json-schema.org/draft/2020-12/vocab/core": true, + "https://json-schema.org/draft/2020-12/vocab/applicator": true, + "https://json-schema.org/draft/2020-12/vocab/unevaluated": true, + "https://json-schema.org/draft/2020-12/vocab/validation": true, + "https://json-schema.org/draft/2020-12/vocab/meta-data": true, + "https://json-schema.org/draft/2020-12/vocab/format-annotation": true, + "https://json-schema.org/draft/2020-12/vocab/content": true + }, + "$dynamicAnchor": "meta", + "title": "Core and Validation specifications meta-schema", + "allOf": [ + {"$ref": "meta/core"}, + {"$ref": "meta/applicator"}, + {"$ref": "meta/unevaluated"}, + {"$ref": "meta/validation"}, + {"$ref": "meta/meta-data"}, + {"$ref": "meta/format-annotation"}, + {"$ref": "meta/content"} + ], + "type": ["object", "boolean"], + "$comment": "This meta-schema also defines keywords that have appeared in previous drafts in order to prevent incompatible extensions as they remain in common use.", + "properties": { + "definitions": { + "$comment": "\"definitions\" has been replaced by \"$defs\".", + "type": "object", + "additionalProperties": { "$dynamicRef": "#meta" }, + "deprecated": true, + "default": {} + }, + "dependencies": { + "$comment": "\"dependencies\" has been split and replaced by \"dependentSchemas\" and \"dependentRequired\" in order to serve their differing semantics.", + "type": "object", + "additionalProperties": { + "anyOf": [ + { "$dynamicRef": "#meta" }, + { "$ref": "meta/validation#/$defs/stringArray" } + ] + }, + "deprecated": true, + "default": {} + }, + "$recursiveAnchor": { + "$comment": "\"$recursiveAnchor\" has been replaced by \"$dynamicAnchor\".", + "$ref": "meta/core#/$defs/anchorString", + "deprecated": true + }, + "$recursiveRef": { + "$comment": "\"$recursiveRef\" has been replaced by \"$dynamicRef\".", + "$ref": "meta/core#/$defs/uriReferenceString", + "deprecated": true + } + } +} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v6/objcompiler.go b/vendor/github.com/santhosh-tekuri/jsonschema/v6/objcompiler.go new file mode 100644 index 00000000..f1494b13 --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v6/objcompiler.go @@ -0,0 +1,549 @@ +package jsonschema + +import ( + "encoding/json" + "fmt" + "math/big" + "strconv" +) + +type objCompiler struct { + c *Compiler + obj map[string]any + up urlPtr + r *root + res *resource + q *queue +} + +func (c *objCompiler) compile(s *Schema) error { + // id -- + if id := c.res.dialect.draft.getID(c.obj); id != "" { + s.ID = id + } + + // anchor -- + if s.DraftVersion < 2019 { + // anchor is specified in id + id := c.string(c.res.dialect.draft.id) + if id != "" { + _, f := split(id) + if f != "" { + var err error + s.Anchor, err = decode(f) + if err != nil { + return &ParseAnchorError{URL: s.Location} + } + } + } + } else { + s.Anchor = c.string("$anchor") + } + + if err := c.compileDraft4(s); err != nil { + return err + } + if s.DraftVersion >= 6 { + if err := c.compileDraft6(s); err != nil { + return err + } + } + if s.DraftVersion >= 7 { + if err := c.compileDraft7(s); err != nil { + return err + } + } + if s.DraftVersion >= 2019 { + if err := c.compileDraft2019(s); err != nil { + return err + } + } + if s.DraftVersion >= 2020 { + if err := c.compileDraft2020(s); err != nil { + return err + } + } + + // vocabularies + vocabs := c.res.dialect.activeVocabs(c.c.roots.assertVocabs, c.c.roots.vocabularies) + for _, vocab := range vocabs { + v := c.c.roots.vocabularies[vocab] + if v == nil { + continue + } + ext, err := v.Compile(&CompilerContext{c}, c.obj) + if err != nil { + return err + } + if ext != nil { + s.Extensions = append(s.Extensions, ext) + } + } + + return nil +} + +func (c *objCompiler) compileDraft4(s *Schema) error { + var err error + + if c.hasVocab("core") { + if s.Ref, err = c.enqueueRef("$ref"); err != nil { + return err + } + if s.DraftVersion < 2019 && s.Ref != nil { + // All other properties in a "$ref" object MUST be ignored + return nil + } + } + + if c.hasVocab("applicator") { + s.AllOf = c.enqueueArr("allOf") + s.AnyOf = c.enqueueArr("anyOf") + s.OneOf = c.enqueueArr("oneOf") + s.Not = c.enqueueProp("not") + + if s.DraftVersion < 2020 { + if items, ok := c.obj["items"]; ok { + if _, ok := items.([]any); ok { + s.Items = c.enqueueArr("items") + s.AdditionalItems = c.enqueueAdditional("additionalItems") + } else { + s.Items = c.enqueueProp("items") + } + } + } + + s.Properties = c.enqueueMap("properties") + if m := c.enqueueMap("patternProperties"); m != nil { + s.PatternProperties = map[Regexp]*Schema{} + for pname, sch := range m { + re, err := c.c.roots.regexpEngine(pname) + if err != nil { + return &InvalidRegexError{c.up.format("patternProperties"), pname, err} + } + s.PatternProperties[re] = sch + } + } + s.AdditionalProperties = c.enqueueAdditional("additionalProperties") + + if m := c.objVal("dependencies"); m != nil { + s.Dependencies = map[string]any{} + for pname, pvalue := range m { + if arr, ok := pvalue.([]any); ok { + s.Dependencies[pname] = toStrings(arr) + } else { + ptr := c.up.ptr.append2("dependencies", pname) + s.Dependencies[pname] = c.enqueuePtr(ptr) + } + } + } + } + + if c.hasVocab("validation") { + if t, ok := c.obj["type"]; ok { + s.Types = newTypes(t) + } + if arr := c.arrVal("enum"); arr != nil { + s.Enum = newEnum(arr) + } + s.MultipleOf = c.numVal("multipleOf") + s.Maximum = c.numVal("maximum") + if c.boolean("exclusiveMaximum") { + s.ExclusiveMaximum = s.Maximum + s.Maximum = nil + } else { + s.ExclusiveMaximum = c.numVal("exclusiveMaximum") + } + s.Minimum = c.numVal("minimum") + if c.boolean("exclusiveMinimum") { + s.ExclusiveMinimum = s.Minimum + s.Minimum = nil + } else { + s.ExclusiveMinimum = c.numVal("exclusiveMinimum") + } + + s.MinLength = c.intVal("minLength") + s.MaxLength = c.intVal("maxLength") + if pat := c.strVal("pattern"); pat != nil { + s.Pattern, err = c.c.roots.regexpEngine(*pat) + if err != nil { + return &InvalidRegexError{c.up.format("pattern"), *pat, err} + } + } + + s.MinItems = c.intVal("minItems") + s.MaxItems = c.intVal("maxItems") + s.UniqueItems = c.boolean("uniqueItems") + + s.MaxProperties = c.intVal("maxProperties") + s.MinProperties = c.intVal("minProperties") + if arr := c.arrVal("required"); arr != nil { + s.Required = toStrings(arr) + } + } + + // format -- + if c.assertFormat(s.DraftVersion) { + if f := c.strVal("format"); f != nil { + if *f == "regex" { + s.Format = &Format{ + Name: "regex", + Validate: c.c.roots.regexpEngine.validate, + } + } else { + s.Format = c.c.formats[*f] + if s.Format == nil { + s.Format = formats[*f] + } + } + } + } + + // annotations -- + s.Title = c.string("title") + s.Description = c.string("description") + if v, ok := c.obj["default"]; ok { + s.Default = &v + } + + return nil +} + +func (c *objCompiler) compileDraft6(s *Schema) error { + if c.hasVocab("applicator") { + s.Contains = c.enqueueProp("contains") + s.PropertyNames = c.enqueueProp("propertyNames") + } + if c.hasVocab("validation") { + if v, ok := c.obj["const"]; ok { + s.Const = &v + } + } + return nil +} + +func (c *objCompiler) compileDraft7(s *Schema) error { + if c.hasVocab("applicator") { + s.If = c.enqueueProp("if") + if s.If != nil { + b := c.boolVal("if") + if b == nil || *b { + s.Then = c.enqueueProp("then") + } + if b == nil || !*b { + s.Else = c.enqueueProp("else") + } + } + } + + if c.c.assertContent { + if ce := c.strVal("contentEncoding"); ce != nil { + s.ContentEncoding = c.c.decoders[*ce] + if s.ContentEncoding == nil { + s.ContentEncoding = decoders[*ce] + } + } + if cm := c.strVal("contentMediaType"); cm != nil { + s.ContentMediaType = c.c.mediaTypes[*cm] + if s.ContentMediaType == nil { + s.ContentMediaType = mediaTypes[*cm] + } + } + } + + // annotations -- + s.Comment = c.string("$comment") + s.ReadOnly = c.boolean("readOnly") + s.WriteOnly = c.boolean("writeOnly") + if arr, ok := c.obj["examples"].([]any); ok { + s.Examples = arr + } + + return nil +} + +func (c *objCompiler) compileDraft2019(s *Schema) error { + var err error + + if c.hasVocab("core") { + if s.RecursiveRef, err = c.enqueueRef("$recursiveRef"); err != nil { + return err + } + s.RecursiveAnchor = c.boolean("$recursiveAnchor") + } + + if c.hasVocab("validation") { + if s.Contains != nil { + s.MinContains = c.intVal("minContains") + s.MaxContains = c.intVal("maxContains") + } + if m := c.objVal("dependentRequired"); m != nil { + s.DependentRequired = map[string][]string{} + for pname, pvalue := range m { + if arr, ok := pvalue.([]any); ok { + s.DependentRequired[pname] = toStrings(arr) + } + } + } + } + + if c.hasVocab("applicator") { + s.DependentSchemas = c.enqueueMap("dependentSchemas") + } + + var unevaluated bool + if s.DraftVersion == 2019 { + unevaluated = c.hasVocab("applicator") + } else { + unevaluated = c.hasVocab("unevaluated") + } + if unevaluated { + s.UnevaluatedItems = c.enqueueProp("unevaluatedItems") + s.UnevaluatedProperties = c.enqueueProp("unevaluatedProperties") + } + + if c.c.assertContent { + if s.ContentMediaType != nil && s.ContentMediaType.UnmarshalJSON != nil { + s.ContentSchema = c.enqueueProp("contentSchema") + } + } + + // annotations -- + s.Deprecated = c.boolean("deprecated") + + return nil +} + +func (c *objCompiler) compileDraft2020(s *Schema) error { + if c.hasVocab("core") { + sch, err := c.enqueueRef("$dynamicRef") + if err != nil { + return err + } + if sch != nil { + dref := c.strVal("$dynamicRef") + _, frag, err := splitFragment(*dref) + if err != nil { + return err + } + var anch string + if anchor, ok := frag.convert().(anchor); ok { + anch = string(anchor) + } + s.DynamicRef = &DynamicRef{sch, anch} + } + s.DynamicAnchor = c.string("$dynamicAnchor") + } + + if c.hasVocab("applicator") { + s.PrefixItems = c.enqueueArr("prefixItems") + s.Items2020 = c.enqueueProp("items") + } + + return nil +} + +// enqueue helpers -- + +func (c *objCompiler) enqueuePtr(ptr jsonPointer) *Schema { + up := urlPtr{c.up.url, ptr} + return c.c.enqueue(c.q, up) +} + +func (c *objCompiler) enqueueRef(pname string) (*Schema, error) { + ref := c.strVal(pname) + if ref == nil { + return nil, nil + } + baseURL := c.res.id + // baseURL := c.r.baseURL(c.up.ptr) + uf, err := baseURL.join(*ref) + if err != nil { + return nil, err + } + + up, err := c.r.resolve(*uf) + if err != nil { + return nil, err + } + if up != nil { + // local ref + return c.enqueuePtr(up.ptr), nil + } + + // remote ref + up_, err := c.c.roots.resolveFragment(*uf) + if err != nil { + return nil, err + } + return c.c.enqueue(c.q, up_), nil +} + +func (c *objCompiler) enqueueProp(pname string) *Schema { + if _, ok := c.obj[pname]; !ok { + return nil + } + ptr := c.up.ptr.append(pname) + return c.enqueuePtr(ptr) +} + +func (c *objCompiler) enqueueArr(pname string) []*Schema { + arr := c.arrVal(pname) + if arr == nil { + return nil + } + sch := make([]*Schema, len(arr)) + for i := range arr { + ptr := c.up.ptr.append2(pname, strconv.Itoa(i)) + sch[i] = c.enqueuePtr(ptr) + } + return sch +} + +func (c *objCompiler) enqueueMap(pname string) map[string]*Schema { + obj := c.objVal(pname) + if obj == nil { + return nil + } + sch := make(map[string]*Schema) + for k := range obj { + ptr := c.up.ptr.append2(pname, k) + sch[k] = c.enqueuePtr(ptr) + } + return sch +} + +func (c *objCompiler) enqueueAdditional(pname string) any { + if b := c.boolVal(pname); b != nil { + return *b + } + if sch := c.enqueueProp(pname); sch != nil { + return sch + } + return nil +} + +// -- + +func (c *objCompiler) hasVocab(name string) bool { + return c.res.dialect.hasVocab(name) +} + +func (c *objCompiler) assertFormat(draftVersion int) bool { + if c.c.assertFormat || draftVersion < 2019 { + return true + } + if draftVersion == 2019 { + return c.hasVocab("format") + } else { + return c.hasVocab("format-assertion") + } +} + +// value helpers -- + +func (c *objCompiler) boolVal(pname string) *bool { + v, ok := c.obj[pname] + if !ok { + return nil + } + b, ok := v.(bool) + if !ok { + return nil + } + return &b +} + +func (c *objCompiler) boolean(pname string) bool { + b := c.boolVal(pname) + return b != nil && *b +} + +func (c *objCompiler) strVal(pname string) *string { + v, ok := c.obj[pname] + if !ok { + return nil + } + s, ok := v.(string) + if !ok { + return nil + } + return &s +} + +func (c *objCompiler) string(pname string) string { + if s := c.strVal(pname); s != nil { + return *s + } + return "" +} + +func (c *objCompiler) numVal(pname string) *big.Rat { + v, ok := c.obj[pname] + if !ok { + return nil + } + switch v.(type) { + case json.Number, float32, float64, int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64: + if n, ok := new(big.Rat).SetString(fmt.Sprint(v)); ok { + return n + } + } + return nil +} + +func (c *objCompiler) intVal(pname string) *int { + if n := c.numVal(pname); n != nil && n.IsInt() { + n := int(n.Num().Int64()) + return &n + } + return nil +} + +func (c *objCompiler) objVal(pname string) map[string]any { + v, ok := c.obj[pname] + if !ok { + return nil + } + obj, ok := v.(map[string]any) + if !ok { + return nil + } + return obj +} + +func (c *objCompiler) arrVal(pname string) []any { + v, ok := c.obj[pname] + if !ok { + return nil + } + arr, ok := v.([]any) + if !ok { + return nil + } + return arr +} + +// -- + +type InvalidRegexError struct { + URL string + Regex string + Err error +} + +func (e *InvalidRegexError) Error() string { + return fmt.Sprintf("invalid regex %q at %q: %v", e.Regex, e.URL, e.Err) +} + +// -- + +func toStrings(arr []any) []string { + var strings []string + for _, item := range arr { + if s, ok := item.(string); ok { + strings = append(strings, s) + } + } + return strings +} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v6/output.go b/vendor/github.com/santhosh-tekuri/jsonschema/v6/output.go new file mode 100644 index 00000000..4995d7b8 --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v6/output.go @@ -0,0 +1,212 @@ +package jsonschema + +import ( + "encoding/json" + "fmt" + "strings" + + "github.com/santhosh-tekuri/jsonschema/v6/kind" + "golang.org/x/text/language" + "golang.org/x/text/message" +) + +var defaultPrinter = message.NewPrinter(language.English) + +// format --- + +func (e *ValidationError) schemaURL() string { + if ref, ok := e.ErrorKind.(*kind.Reference); ok { + return ref.URL + } else { + return e.SchemaURL + } +} + +func (e *ValidationError) absoluteKeywordLocation() string { + var schemaURL string + var keywordPath []string + if ref, ok := e.ErrorKind.(*kind.Reference); ok { + schemaURL = ref.URL + keywordPath = nil + } else { + schemaURL = e.SchemaURL + keywordPath = e.ErrorKind.KeywordPath() + } + return fmt.Sprintf("%s%s", schemaURL, encode(jsonPtr(keywordPath))) +} + +func (e *ValidationError) skip() bool { + if len(e.Causes) == 1 { + _, ok := e.ErrorKind.(*kind.Reference) + return ok + } + return false +} + +func (e *ValidationError) display(sb *strings.Builder, verbose bool, indent int, absKwLoc string, p *message.Printer) { + if !e.skip() { + if indent > 0 { + sb.WriteByte('\n') + for i := 0; i < indent-1; i++ { + sb.WriteString(" ") + } + sb.WriteString("- ") + } + indent = indent + 1 + + prevAbsKwLoc := absKwLoc + absKwLoc = e.absoluteKeywordLocation() + + if _, ok := e.ErrorKind.(*kind.Schema); ok { + sb.WriteString(e.ErrorKind.LocalizedString(p)) + } else { + sb.WriteString(p.Sprintf("at %s", quote(jsonPtr(e.InstanceLocation)))) + if verbose { + schLoc := absKwLoc + if prevAbsKwLoc != "" { + pu, _ := split(prevAbsKwLoc) + u, f := split(absKwLoc) + if u == pu { + schLoc = fmt.Sprintf("S#%s", f) + } + } + fmt.Fprintf(sb, " [%s]", schLoc) + } + fmt.Fprintf(sb, ": %s", e.ErrorKind.LocalizedString(p)) + } + } + for _, cause := range e.Causes { + cause.display(sb, verbose, indent, absKwLoc, p) + } +} + +func (e *ValidationError) Error() string { + return e.LocalizedError(defaultPrinter) +} + +func (e *ValidationError) LocalizedError(p *message.Printer) string { + var sb strings.Builder + e.display(&sb, false, 0, "", p) + return sb.String() +} + +func (e *ValidationError) GoString() string { + return e.LocalizedGoString(defaultPrinter) +} + +func (e *ValidationError) LocalizedGoString(p *message.Printer) string { + var sb strings.Builder + e.display(&sb, true, 0, "", p) + return sb.String() +} + +func jsonPtr(tokens []string) string { + var sb strings.Builder + for _, tok := range tokens { + sb.WriteByte('/') + sb.WriteString(escape(tok)) + } + return sb.String() +} + +// -- + +// Flag is output format with simple boolean property valid. +type FlagOutput struct { + Valid bool `json:"valid"` +} + +// The `Flag` output format, merely the boolean result. +func (e *ValidationError) FlagOutput() *FlagOutput { + return &FlagOutput{Valid: false} +} + +// -- + +type OutputUnit struct { + Valid bool `json:"valid"` + KeywordLocation string `json:"keywordLocation"` + AbsoluteKeywordLocation string `json:"AbsoluteKeywordLocation,omitempty"` + InstanceLocation string `json:"instanceLocation"` + Error *OutputError `json:"error,omitempty"` + Errors []OutputUnit `json:"errors,omitempty"` +} + +type OutputError struct { + Kind ErrorKind + p *message.Printer +} + +func (k OutputError) MarshalJSON() ([]byte, error) { + return json.Marshal(k.Kind.LocalizedString(k.p)) +} + +// The `Basic` structure, a flat list of output units. +func (e *ValidationError) BasicOutput() *OutputUnit { + return e.LocalizedBasicOutput(defaultPrinter) +} + +func (e *ValidationError) LocalizedBasicOutput(p *message.Printer) *OutputUnit { + out := e.output(true, false, "", "", p) + return &out +} + +// The `Detailed` structure, based on the schema. +func (e *ValidationError) DetailedOutput() *OutputUnit { + return e.LocalizedDetailedOutput(defaultPrinter) +} + +func (e *ValidationError) LocalizedDetailedOutput(p *message.Printer) *OutputUnit { + out := e.output(false, false, "", "", p) + return &out +} + +func (e *ValidationError) output(flatten, inRef bool, schemaURL, kwLoc string, p *message.Printer) OutputUnit { + if !inRef { + if _, ok := e.ErrorKind.(*kind.Reference); ok { + inRef = true + } + } + if schemaURL != "" { + kwLoc += e.SchemaURL[len(schemaURL):] + if ref, ok := e.ErrorKind.(*kind.Reference); ok { + kwLoc += jsonPtr(ref.KeywordPath()) + } + } + schemaURL = e.schemaURL() + + keywordLocation := kwLoc + if _, ok := e.ErrorKind.(*kind.Reference); !ok { + keywordLocation += jsonPtr(e.ErrorKind.KeywordPath()) + } + + out := OutputUnit{ + Valid: false, + InstanceLocation: jsonPtr(e.InstanceLocation), + KeywordLocation: keywordLocation, + } + if inRef { + out.AbsoluteKeywordLocation = e.absoluteKeywordLocation() + } + for _, cause := range e.Causes { + causeOut := cause.output(flatten, inRef, schemaURL, kwLoc, p) + if cause.skip() { + causeOut = causeOut.Errors[0] + } + if flatten { + errors := causeOut.Errors + causeOut.Errors = nil + causeOut.Error = &OutputError{cause.ErrorKind, p} + out.Errors = append(out.Errors, causeOut) + if len(errors) > 0 { + out.Errors = append(out.Errors, errors...) + } + } else { + out.Errors = append(out.Errors, causeOut) + } + } + if len(out.Errors) == 0 { + out.Error = &OutputError{e.ErrorKind, p} + } + return out +} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v6/position.go b/vendor/github.com/santhosh-tekuri/jsonschema/v6/position.go new file mode 100644 index 00000000..576a2a47 --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v6/position.go @@ -0,0 +1,142 @@ +package jsonschema + +import ( + "strconv" + "strings" +) + +// Position tells possible tokens in json. +type Position interface { + collect(v any, ptr jsonPointer) map[jsonPointer]any +} + +// -- + +type AllProp struct{} + +func (AllProp) collect(v any, ptr jsonPointer) map[jsonPointer]any { + obj, ok := v.(map[string]any) + if !ok { + return nil + } + m := map[jsonPointer]any{} + for pname, pvalue := range obj { + m[ptr.append(pname)] = pvalue + } + return m +} + +// -- + +type AllItem struct{} + +func (AllItem) collect(v any, ptr jsonPointer) map[jsonPointer]any { + arr, ok := v.([]any) + if !ok { + return nil + } + m := map[jsonPointer]any{} + for i, item := range arr { + m[ptr.append(strconv.Itoa(i))] = item + } + return m +} + +// -- + +type Prop string + +func (p Prop) collect(v any, ptr jsonPointer) map[jsonPointer]any { + obj, ok := v.(map[string]any) + if !ok { + return nil + } + pvalue, ok := obj[string(p)] + if !ok { + return nil + } + return map[jsonPointer]any{ + ptr.append(string(p)): pvalue, + } +} + +// -- + +type Item int + +func (i Item) collect(v any, ptr jsonPointer) map[jsonPointer]any { + arr, ok := v.([]any) + if !ok { + return nil + } + if i < 0 || int(i) >= len(arr) { + return nil + } + return map[jsonPointer]any{ + ptr.append(strconv.Itoa(int(i))): arr[int(i)], + } +} + +// -- + +// SchemaPath tells where to look for subschema inside keyword. +type SchemaPath []Position + +func schemaPath(path string) SchemaPath { + var sp SchemaPath + for _, tok := range strings.Split(path, "/") { + var pos Position + switch tok { + case "*": + pos = AllProp{} + case "[]": + pos = AllItem{} + default: + if i, err := strconv.Atoi(tok); err == nil { + pos = Item(i) + } else { + pos = Prop(tok) + } + } + sp = append(sp, pos) + } + return sp +} + +func (sp SchemaPath) collect(v any, ptr jsonPointer) map[jsonPointer]any { + if len(sp) == 0 { + return map[jsonPointer]any{ + ptr: v, + } + } + p, sp := sp[0], sp[1:] + m := p.collect(v, ptr) + mm := map[jsonPointer]any{} + for ptr, v := range m { + m = sp.collect(v, ptr) + for k, v := range m { + mm[k] = v + } + } + return mm +} + +func (sp SchemaPath) String() string { + var sb strings.Builder + for _, pos := range sp { + if sb.Len() != 0 { + sb.WriteByte('/') + } + switch pos := pos.(type) { + case AllProp: + sb.WriteString("*") + case AllItem: + sb.WriteString("[]") + case Prop: + sb.WriteString(string(pos)) + case Item: + sb.WriteString(strconv.Itoa(int(pos))) + } + } + return sb.String() +} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v6/root.go b/vendor/github.com/santhosh-tekuri/jsonschema/v6/root.go new file mode 100644 index 00000000..86069010 --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v6/root.go @@ -0,0 +1,202 @@ +package jsonschema + +import ( + "fmt" + "slices" + "strings" +) + +type root struct { + url url + doc any + resources map[jsonPointer]*resource + subschemasProcessed map[jsonPointer]struct{} +} + +func (r *root) rootResource() *resource { + return r.resources[""] +} + +func (r *root) resource(ptr jsonPointer) *resource { + for { + if res, ok := r.resources[ptr]; ok { + return res + } + slash := strings.LastIndexByte(string(ptr), '/') + if slash == -1 { + break + } + ptr = ptr[:slash] + } + return r.rootResource() +} + +func (r *root) resolveFragmentIn(frag fragment, res *resource) (urlPtr, error) { + var ptr jsonPointer + switch f := frag.convert().(type) { + case jsonPointer: + ptr = res.ptr.concat(f) + case anchor: + aptr, ok := res.anchors[f] + if !ok { + return urlPtr{}, &AnchorNotFoundError{ + URL: r.url.String(), + Reference: (&urlFrag{res.id, frag}).String(), + } + } + ptr = aptr + } + return urlPtr{r.url, ptr}, nil +} + +func (r *root) resolveFragment(frag fragment) (urlPtr, error) { + return r.resolveFragmentIn(frag, r.rootResource()) +} + +// resovles urlFrag to urlPtr from root. +// returns nil if it is external. +func (r *root) resolve(uf urlFrag) (*urlPtr, error) { + var res *resource + if uf.url == r.url { + res = r.rootResource() + } else { + // look for resource with id==uf.url + for _, v := range r.resources { + if v.id == uf.url { + res = v + break + } + } + if res == nil { + return nil, nil // external url + } + } + up, err := r.resolveFragmentIn(uf.frag, res) + return &up, err +} + +func (r *root) collectAnchors(sch any, schPtr jsonPointer, res *resource) error { + obj, ok := sch.(map[string]any) + if !ok { + return nil + } + + addAnchor := func(anchor anchor) error { + ptr1, ok := res.anchors[anchor] + if ok { + if ptr1 == schPtr { + // anchor with same root_ptr already exists + return nil + } + return &DuplicateAnchorError{ + string(anchor), r.url.String(), string(ptr1), string(schPtr), + } + } + res.anchors[anchor] = schPtr + return nil + } + + if res.dialect.draft.version < 2019 { + if _, ok := obj["$ref"]; ok { + // All other properties in a "$ref" object MUST be ignored + return nil + } + // anchor is specified in id + if id, ok := strVal(obj, res.dialect.draft.id); ok { + _, frag, err := splitFragment(id) + if err != nil { + loc := urlPtr{r.url, schPtr} + return &ParseAnchorError{loc.String()} + } + if anchor, ok := frag.convert().(anchor); ok { + if err := addAnchor(anchor); err != nil { + return err + } + } + } + } + if res.dialect.draft.version >= 2019 { + if s, ok := strVal(obj, "$anchor"); ok { + if err := addAnchor(anchor(s)); err != nil { + return err + } + } + } + if res.dialect.draft.version >= 2020 { + if s, ok := strVal(obj, "$dynamicAnchor"); ok { + if err := addAnchor(anchor(s)); err != nil { + return err + } + res.dynamicAnchors = append(res.dynamicAnchors, anchor(s)) + } + } + + return nil +} + +func (r *root) clone() *root { + processed := map[jsonPointer]struct{}{} + for k := range r.subschemasProcessed { + processed[k] = struct{}{} + } + resources := map[jsonPointer]*resource{} + for k, v := range r.resources { + resources[k] = v.clone() + } + return &root{ + url: r.url, + doc: r.doc, + resources: resources, + subschemasProcessed: processed, + } +} + +// -- + +type resource struct { + ptr jsonPointer + id url + dialect dialect + anchors map[anchor]jsonPointer + dynamicAnchors []anchor +} + +func newResource(ptr jsonPointer, id url) *resource { + return &resource{ptr: ptr, id: id, anchors: make(map[anchor]jsonPointer)} +} + +func (res *resource) clone() *resource { + anchors := map[anchor]jsonPointer{} + for k, v := range res.anchors { + anchors[k] = v + } + return &resource{ + ptr: res.ptr, + id: res.id, + dialect: res.dialect, + anchors: anchors, + dynamicAnchors: slices.Clone(res.dynamicAnchors), + } +} + +//-- + +type UnsupportedVocabularyError struct { + URL string + Vocabulary string +} + +func (e *UnsupportedVocabularyError) Error() string { + return fmt.Sprintf("unsupported vocabulary %q in %q", e.Vocabulary, e.URL) +} + +// -- + +type AnchorNotFoundError struct { + URL string + Reference string +} + +func (e *AnchorNotFoundError) Error() string { + return fmt.Sprintf("anchor in %q not found in schema %q", e.Reference, e.URL) +} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v6/roots.go b/vendor/github.com/santhosh-tekuri/jsonschema/v6/roots.go new file mode 100644 index 00000000..b9b79baa --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v6/roots.go @@ -0,0 +1,289 @@ +package jsonschema + +import ( + "fmt" + "strings" +) + +type roots struct { + defaultDraft *Draft + roots map[url]*root + loader defaultLoader + regexpEngine RegexpEngine + vocabularies map[string]*Vocabulary + assertVocabs bool +} + +func newRoots() *roots { + return &roots{ + defaultDraft: draftLatest, + roots: map[url]*root{}, + loader: defaultLoader{ + docs: map[url]any{}, + loader: FileLoader{}, + }, + regexpEngine: goRegexpCompile, + vocabularies: map[string]*Vocabulary{}, + } +} + +func (rr *roots) orLoad(u url) (*root, error) { + if r, ok := rr.roots[u]; ok { + return r, nil + } + doc, err := rr.loader.load(u) + if err != nil { + return nil, err + } + return rr.addRoot(u, doc) +} + +func (rr *roots) addRoot(u url, doc any) (*root, error) { + r := &root{ + url: u, + doc: doc, + resources: map[jsonPointer]*resource{}, + subschemasProcessed: map[jsonPointer]struct{}{}, + } + if err := rr.collectResources(r, doc, u, "", dialect{rr.defaultDraft, nil}); err != nil { + return nil, err + } + if !strings.HasPrefix(u.String(), "http://json-schema.org/") && + !strings.HasPrefix(u.String(), "https://json-schema.org/") { + if err := rr.validate(r, doc, ""); err != nil { + return nil, err + } + } + + rr.roots[u] = r + return r, nil +} + +func (rr *roots) resolveFragment(uf urlFrag) (urlPtr, error) { + r, err := rr.orLoad(uf.url) + if err != nil { + return urlPtr{}, err + } + return r.resolveFragment(uf.frag) +} + +func (rr *roots) collectResources(r *root, sch any, base url, schPtr jsonPointer, fallback dialect) error { + if _, ok := r.subschemasProcessed[schPtr]; ok { + return nil + } + if err := rr._collectResources(r, sch, base, schPtr, fallback); err != nil { + return err + } + r.subschemasProcessed[schPtr] = struct{}{} + return nil +} + +func (rr *roots) _collectResources(r *root, sch any, base url, schPtr jsonPointer, fallback dialect) error { + if _, ok := sch.(bool); ok { + if schPtr.isEmpty() { + // root resource + res := newResource(schPtr, base) + res.dialect = fallback + r.resources[schPtr] = res + } + return nil + } + obj, ok := sch.(map[string]any) + if !ok { + return nil + } + + hasSchema := false + if sch, ok := obj["$schema"]; ok { + if _, ok := sch.(string); ok { + hasSchema = true + } + } + + draft, err := rr.loader.getDraft(urlPtr{r.url, schPtr}, sch, fallback.draft, map[url]struct{}{}) + if err != nil { + return err + } + id := draft.getID(obj) + if id == "" && !schPtr.isEmpty() { + // ignore $schema + draft = fallback.draft + hasSchema = false + id = draft.getID(obj) + } + + var res *resource + if id != "" { + uf, err := base.join(id) + if err != nil { + loc := urlPtr{r.url, schPtr} + return &ParseIDError{loc.String()} + } + base = uf.url + res = newResource(schPtr, base) + } else if schPtr.isEmpty() { + // root resource + res = newResource(schPtr, base) + } + + if res != nil { + found := false + for _, res := range r.resources { + if res.id == base { + found = true + if res.ptr != schPtr { + return &DuplicateIDError{base.String(), r.url.String(), string(schPtr), string(res.ptr)} + } + } + } + if !found { + if hasSchema { + vocabs, err := rr.loader.getMetaVocabs(sch, draft, rr.vocabularies) + if err != nil { + return err + } + res.dialect = dialect{draft, vocabs} + } else { + res.dialect = fallback + } + r.resources[schPtr] = res + } + } + + var baseRes *resource + for _, res := range r.resources { + if res.id == base { + baseRes = res + break + } + } + if baseRes == nil { + panic("baseres is nil") + } + + // found base resource + if err := r.collectAnchors(sch, schPtr, baseRes); err != nil { + return err + } + + // process subschemas + subschemas := map[jsonPointer]any{} + for _, sp := range draft.subschemas { + ss := sp.collect(obj, schPtr) + for k, v := range ss { + subschemas[k] = v + } + } + for _, vocab := range baseRes.dialect.activeVocabs(true, rr.vocabularies) { + if v := rr.vocabularies[vocab]; v != nil { + for _, sp := range v.Subschemas { + ss := sp.collect(obj, schPtr) + for k, v := range ss { + subschemas[k] = v + } + } + } + } + for ptr, v := range subschemas { + if err := rr.collectResources(r, v, base, ptr, baseRes.dialect); err != nil { + return err + } + } + + return nil +} + +func (rr *roots) ensureSubschema(up urlPtr) error { + r, err := rr.orLoad(up.url) + if err != nil { + return err + } + if _, ok := r.subschemasProcessed[up.ptr]; ok { + return nil + } + v, err := up.lookup(r.doc) + if err != nil { + return err + } + rClone := r.clone() + if err := rr.addSubschema(rClone, up.ptr); err != nil { + return err + } + if err := rr.validate(rClone, v, up.ptr); err != nil { + return err + } + rr.roots[r.url] = rClone + return nil +} + +func (rr *roots) addSubschema(r *root, ptr jsonPointer) error { + v, err := (&urlPtr{r.url, ptr}).lookup(r.doc) + if err != nil { + return err + } + base := r.resource(ptr) + baseURL := base.id + if err := rr.collectResources(r, v, baseURL, ptr, base.dialect); err != nil { + return err + } + + // collect anchors + if _, ok := r.resources[ptr]; !ok { + res := r.resource(ptr) + if err := r.collectAnchors(v, ptr, res); err != nil { + return err + } + } + return nil +} + +func (rr *roots) validate(r *root, v any, ptr jsonPointer) error { + dialect := r.resource(ptr).dialect + meta := dialect.getSchema(rr.assertVocabs, rr.vocabularies) + if err := meta.validate(v, rr.regexpEngine, meta, r.resources, rr.assertVocabs, rr.vocabularies); err != nil { + up := urlPtr{r.url, ptr} + return &SchemaValidationError{URL: up.String(), Err: err} + } + return nil +} + +// -- + +type InvalidMetaSchemaURLError struct { + URL string + Err error +} + +func (e *InvalidMetaSchemaURLError) Error() string { + return fmt.Sprintf("invalid $schema in %q: %v", e.URL, e.Err) +} + +// -- + +type UnsupportedDraftError struct { + URL string +} + +func (e *UnsupportedDraftError) Error() string { + return fmt.Sprintf("draft %q is not supported", e.URL) +} + +// -- + +type MetaSchemaCycleError struct { + URL string +} + +func (e *MetaSchemaCycleError) Error() string { + return fmt.Sprintf("cycle in resolving $schema in %q", e.URL) +} + +// -- + +type MetaSchemaMismatchError struct { + URL string +} + +func (e *MetaSchemaMismatchError) Error() string { + return fmt.Sprintf("$schema in %q does not match with $schema in root", e.URL) +} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v6/schema.go b/vendor/github.com/santhosh-tekuri/jsonschema/v6/schema.go new file mode 100644 index 00000000..a970311f --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v6/schema.go @@ -0,0 +1,248 @@ +package jsonschema + +import ( + "encoding/json" + "fmt" + "math/big" +) + +// Schema is the regpresentation of a compiled +// jsonschema. +type Schema struct { + up urlPtr + resource *Schema + dynamicAnchors map[string]*Schema + allPropsEvaluated bool + allItemsEvaluated bool + numItemsEvaluated int + + DraftVersion int + Location string + + // type agnostic -- + Bool *bool // boolean schema + ID string + Ref *Schema + Anchor string + RecursiveRef *Schema + RecursiveAnchor bool + DynamicRef *DynamicRef + DynamicAnchor string // "" if not specified + Types *Types + Enum *Enum + Const *any + Not *Schema + AllOf []*Schema + AnyOf []*Schema + OneOf []*Schema + If *Schema + Then *Schema + Else *Schema + Format *Format + + // object -- + MaxProperties *int + MinProperties *int + Required []string + PropertyNames *Schema + Properties map[string]*Schema + PatternProperties map[Regexp]*Schema + AdditionalProperties any // nil or bool or *Schema + Dependencies map[string]any // value is []string or *Schema + DependentRequired map[string][]string + DependentSchemas map[string]*Schema + UnevaluatedProperties *Schema + + // array -- + MinItems *int + MaxItems *int + UniqueItems bool + Contains *Schema + MinContains *int + MaxContains *int + Items any // nil or []*Schema or *Schema + AdditionalItems any // nil or bool or *Schema + PrefixItems []*Schema + Items2020 *Schema + UnevaluatedItems *Schema + + // string -- + MinLength *int + MaxLength *int + Pattern Regexp + ContentEncoding *Decoder + ContentMediaType *MediaType + ContentSchema *Schema + + // number -- + Maximum *big.Rat + Minimum *big.Rat + ExclusiveMaximum *big.Rat + ExclusiveMinimum *big.Rat + MultipleOf *big.Rat + + Extensions []SchemaExt + + // annotations -- + Title string + Description string + Default *any + Comment string + ReadOnly bool + WriteOnly bool + Examples []any + Deprecated bool +} + +// -- + +type jsonType int + +const ( + invalidType jsonType = 0 + nullType jsonType = 1 << iota + booleanType + numberType + integerType + stringType + arrayType + objectType +) + +func typeOf(v any) jsonType { + switch v.(type) { + case nil: + return nullType + case bool: + return booleanType + case json.Number, float32, float64, int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64: + return numberType + case string: + return stringType + case []any: + return arrayType + case map[string]any: + return objectType + default: + return invalidType + } +} + +func typeFromString(s string) jsonType { + switch s { + case "null": + return nullType + case "boolean": + return booleanType + case "number": + return numberType + case "integer": + return integerType + case "string": + return stringType + case "array": + return arrayType + case "object": + return objectType + } + return invalidType +} + +func (jt jsonType) String() string { + switch jt { + case nullType: + return "null" + case booleanType: + return "boolean" + case numberType: + return "number" + case integerType: + return "integer" + case stringType: + return "string" + case arrayType: + return "array" + case objectType: + return "object" + } + return "" +} + +// -- + +// Types encapsulates list of json value types. +type Types int + +func newTypes(v any) *Types { + var types Types + switch v := v.(type) { + case string: + types.add(typeFromString(v)) + case []any: + for _, item := range v { + if s, ok := item.(string); ok { + types.add(typeFromString(s)) + } + } + } + if types.IsEmpty() { + return nil + } + return &types +} + +func (tt Types) IsEmpty() bool { + return tt == 0 +} + +func (tt *Types) add(t jsonType) { + *tt = Types(int(*tt) | int(t)) +} + +func (tt Types) contains(t jsonType) bool { + return int(tt)&int(t) != 0 +} + +func (tt Types) ToStrings() []string { + types := []jsonType{ + nullType, booleanType, numberType, integerType, + stringType, arrayType, objectType, + } + var arr []string + for _, t := range types { + if tt.contains(t) { + arr = append(arr, t.String()) + } + } + return arr +} + +func (tt Types) String() string { + return fmt.Sprintf("%v", tt.ToStrings()) +} + +// -- + +type Enum struct { + Values []any + types Types +} + +func newEnum(arr []any) *Enum { + var types Types + for _, item := range arr { + types.add(typeOf(item)) + } + return &Enum{arr, types} +} + +// -- + +type DynamicRef struct { + Ref *Schema + Anchor string // "" if not specified +} + +func newSchema(up urlPtr) *Schema { + return &Schema{up: up, Location: up.String()} +} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v6/util.go b/vendor/github.com/santhosh-tekuri/jsonschema/v6/util.go new file mode 100644 index 00000000..c6f8e775 --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v6/util.go @@ -0,0 +1,464 @@ +package jsonschema + +import ( + "encoding/json" + "fmt" + "hash/maphash" + "math/big" + gourl "net/url" + "path/filepath" + "runtime" + "slices" + "strconv" + "strings" + + "github.com/santhosh-tekuri/jsonschema/v6/kind" + "golang.org/x/text/message" +) + +// -- + +type url (string) + +func (u url) String() string { + return string(u) +} + +func (u url) join(ref string) (*urlFrag, error) { + base, err := gourl.Parse(string(u)) + if err != nil { + return nil, &ParseURLError{URL: u.String(), Err: err} + } + + ref, frag, err := splitFragment(ref) + if err != nil { + return nil, err + } + refURL, err := gourl.Parse(ref) + if err != nil { + return nil, &ParseURLError{URL: ref, Err: err} + } + resolved := base.ResolveReference(refURL) + + // see https://github.com/golang/go/issues/66084 (net/url: ResolveReference ignores Opaque value) + if !refURL.IsAbs() && base.Opaque != "" { + resolved.Opaque = base.Opaque + } + + return &urlFrag{url: url(resolved.String()), frag: frag}, nil +} + +// -- + +type jsonPointer string + +func escape(tok string) string { + tok = strings.ReplaceAll(tok, "~", "~0") + tok = strings.ReplaceAll(tok, "/", "~1") + return tok +} + +func unescape(tok string) (string, bool) { + tilde := strings.IndexByte(tok, '~') + if tilde == -1 { + return tok, true + } + sb := new(strings.Builder) + for { + sb.WriteString(tok[:tilde]) + tok = tok[tilde+1:] + if tok == "" { + return "", false + } + switch tok[0] { + case '0': + sb.WriteByte('~') + case '1': + sb.WriteByte('/') + default: + return "", false + } + tok = tok[1:] + tilde = strings.IndexByte(tok, '~') + if tilde == -1 { + sb.WriteString(tok) + break + } + } + return sb.String(), true +} + +func (ptr jsonPointer) isEmpty() bool { + return string(ptr) == "" +} + +func (ptr jsonPointer) concat(next jsonPointer) jsonPointer { + return jsonPointer(fmt.Sprintf("%s%s", ptr, next)) +} + +func (ptr jsonPointer) append(tok string) jsonPointer { + return jsonPointer(fmt.Sprintf("%s/%s", ptr, escape(tok))) +} + +func (ptr jsonPointer) append2(tok1, tok2 string) jsonPointer { + return jsonPointer(fmt.Sprintf("%s/%s/%s", ptr, escape(tok1), escape(tok2))) +} + +// -- + +type anchor string + +// -- + +type fragment string + +func decode(frag string) (string, error) { + return gourl.PathUnescape(frag) +} + +// avoids escaping /. +func encode(frag string) string { + var sb strings.Builder + for i, tok := range strings.Split(frag, "/") { + if i > 0 { + sb.WriteByte('/') + } + sb.WriteString(gourl.PathEscape(tok)) + } + return sb.String() +} + +func splitFragment(str string) (string, fragment, error) { + u, f := split(str) + f, err := decode(f) + if err != nil { + return "", fragment(""), &ParseURLError{URL: str, Err: err} + } + return u, fragment(f), nil +} + +func split(str string) (string, string) { + hash := strings.IndexByte(str, '#') + if hash == -1 { + return str, "" + } + return str[:hash], str[hash+1:] +} + +func (frag fragment) convert() any { + str := string(frag) + if str == "" || strings.HasPrefix(str, "/") { + return jsonPointer(str) + } + return anchor(str) +} + +// -- + +type urlFrag struct { + url url + frag fragment +} + +func startsWithWindowsDrive(s string) bool { + if s != "" && strings.HasPrefix(s[1:], `:\`) { + return (s[0] >= 'a' && s[0] <= 'z') || (s[0] >= 'A' && s[0] <= 'Z') + } + return false +} + +func absolute(input string) (*urlFrag, error) { + u, frag, err := splitFragment(input) + if err != nil { + return nil, err + } + + // if windows absolute file path, convert to file url + // because: net/url parses driver name as scheme + if runtime.GOOS == "windows" && startsWithWindowsDrive(u) { + u = "file:///" + filepath.ToSlash(u) + } + + gourl, err := gourl.Parse(u) + if err != nil { + return nil, &ParseURLError{URL: input, Err: err} + } + if gourl.IsAbs() { + return &urlFrag{url(u), frag}, nil + } + + // avoid filesystem api in wasm + if runtime.GOOS != "js" { + abs, err := filepath.Abs(u) + if err != nil { + return nil, &ParseURLError{URL: input, Err: err} + } + u = abs + } + if !strings.HasPrefix(u, "/") { + u = "/" + u + } + u = "file://" + filepath.ToSlash(u) + + _, err = gourl.Parse(u) + if err != nil { + return nil, &ParseURLError{URL: input, Err: err} + } + return &urlFrag{url: url(u), frag: frag}, nil +} + +func (uf *urlFrag) String() string { + return fmt.Sprintf("%s#%s", uf.url, encode(string(uf.frag))) +} + +// -- + +type urlPtr struct { + url url + ptr jsonPointer +} + +func (up *urlPtr) lookup(v any) (any, error) { + for _, tok := range strings.Split(string(up.ptr), "/")[1:] { + tok, ok := unescape(tok) + if !ok { + return nil, &InvalidJsonPointerError{up.String()} + } + switch val := v.(type) { + case map[string]any: + if pvalue, ok := val[tok]; ok { + v = pvalue + continue + } + case []any: + if index, err := strconv.Atoi(tok); err == nil { + if index >= 0 && index < len(val) { + v = val[index] + continue + } + } + } + return nil, &JSONPointerNotFoundError{up.String()} + } + return v, nil +} + +func (up *urlPtr) format(tok string) string { + return fmt.Sprintf("%s#%s/%s", up.url, encode(string(up.ptr)), encode(escape(tok))) +} + +func (up *urlPtr) String() string { + return fmt.Sprintf("%s#%s", up.url, encode(string(up.ptr))) +} + +// -- + +func minInt(i, j int) int { + if i < j { + return i + } + return j +} + +func strVal(obj map[string]any, prop string) (string, bool) { + v, ok := obj[prop] + if !ok { + return "", false + } + s, ok := v.(string) + return s, ok +} + +func isInteger(num any) bool { + rat, ok := new(big.Rat).SetString(fmt.Sprint(num)) + return ok && rat.IsInt() +} + +// quote returns single-quoted string. +// used for embedding quoted strings in json. +func quote(s string) string { + s = fmt.Sprintf("%q", s) + s = strings.ReplaceAll(s, `\"`, `"`) + s = strings.ReplaceAll(s, `'`, `\'`) + return "'" + s[1:len(s)-1] + "'" +} + +func equals(v1, v2 any) (bool, ErrorKind) { + switch v1 := v1.(type) { + case map[string]any: + v2, ok := v2.(map[string]any) + if !ok || len(v1) != len(v2) { + return false, nil + } + for k, val1 := range v1 { + val2, ok := v2[k] + if !ok { + return false, nil + } + if ok, k := equals(val1, val2); !ok || k != nil { + return ok, k + } + } + return true, nil + case []any: + v2, ok := v2.([]any) + if !ok || len(v1) != len(v2) { + return false, nil + } + for i := range v1 { + if ok, k := equals(v1[i], v2[i]); !ok || k != nil { + return ok, k + } + } + return true, nil + case nil: + return v2 == nil, nil + case bool: + v2, ok := v2.(bool) + return ok && v1 == v2, nil + case string: + v2, ok := v2.(string) + return ok && v1 == v2, nil + case json.Number, float32, float64, int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64: + num1, ok1 := new(big.Rat).SetString(fmt.Sprint(v1)) + num2, ok2 := new(big.Rat).SetString(fmt.Sprint(v2)) + return ok1 && ok2 && num1.Cmp(num2) == 0, nil + default: + return false, &kind.InvalidJsonValue{Value: v1} + } +} + +func duplicates(arr []any) (int, int, ErrorKind) { + if len(arr) <= 20 { + for i := 1; i < len(arr); i++ { + for j := 0; j < i; j++ { + if ok, k := equals(arr[i], arr[j]); ok || k != nil { + return j, i, k + } + } + } + return -1, -1, nil + } + + m := make(map[uint64][]int) + h := new(maphash.Hash) + for i, item := range arr { + h.Reset() + writeHash(item, h) + hash := h.Sum64() + indexes, ok := m[hash] + if ok { + for _, j := range indexes { + if ok, k := equals(item, arr[j]); ok || k != nil { + return j, i, k + } + } + } + indexes = append(indexes, i) + m[hash] = indexes + } + return -1, -1, nil +} + +func writeHash(v any, h *maphash.Hash) ErrorKind { + switch v := v.(type) { + case map[string]any: + _ = h.WriteByte(0) + props := make([]string, 0, len(v)) + for prop := range v { + props = append(props, prop) + } + slices.Sort(props) + for _, prop := range props { + writeHash(prop, h) + writeHash(v[prop], h) + } + case []any: + _ = h.WriteByte(1) + for _, item := range v { + writeHash(item, h) + } + case nil: + _ = h.WriteByte(2) + case bool: + _ = h.WriteByte(3) + if v { + _ = h.WriteByte(1) + } else { + _ = h.WriteByte(0) + } + case string: + _ = h.WriteByte(4) + _, _ = h.WriteString(v) + case json.Number, float32, float64, int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64: + _ = h.WriteByte(5) + num, _ := new(big.Rat).SetString(fmt.Sprint(v)) + _, _ = h.Write(num.Num().Bytes()) + _, _ = h.Write(num.Denom().Bytes()) + default: + return &kind.InvalidJsonValue{Value: v} + } + return nil +} + +// -- + +type ParseURLError struct { + URL string + Err error +} + +func (e *ParseURLError) Error() string { + return fmt.Sprintf("error in parsing %q: %v", e.URL, e.Err) +} + +// -- + +type InvalidJsonPointerError struct { + URL string +} + +func (e *InvalidJsonPointerError) Error() string { + return fmt.Sprintf("invalid json-pointer %q", e.URL) +} + +// -- + +type JSONPointerNotFoundError struct { + URL string +} + +func (e *JSONPointerNotFoundError) Error() string { + return fmt.Sprintf("json-pointer in %q not found", e.URL) +} + +// -- + +type SchemaValidationError struct { + URL string + Err error +} + +func (e *SchemaValidationError) Error() string { + return fmt.Sprintf("%q is not valid against metaschema: %v", e.URL, e.Err) +} + +// -- + +// LocalizableError is an error whose message is localizable. +func LocalizableError(format string, args ...any) error { + return &localizableError{format, args} +} + +type localizableError struct { + msg string + args []any +} + +func (e *localizableError) Error() string { + return fmt.Sprintf(e.msg, e.args...) +} + +func (e *localizableError) LocalizedError(p *message.Printer) string { + return p.Sprintf(e.msg, e.args...) +} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v6/validator.go b/vendor/github.com/santhosh-tekuri/jsonschema/v6/validator.go new file mode 100644 index 00000000..e2ace37a --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v6/validator.go @@ -0,0 +1,975 @@ +package jsonschema + +import ( + "encoding/json" + "fmt" + "math/big" + "slices" + "strconv" + "unicode/utf8" + + "github.com/santhosh-tekuri/jsonschema/v6/kind" + "golang.org/x/text/message" +) + +func (sch *Schema) Validate(v any) error { + return sch.validate(v, nil, nil, nil, false, nil) +} + +func (sch *Schema) validate(v any, regexpEngine RegexpEngine, meta *Schema, resources map[jsonPointer]*resource, assertVocabs bool, vocabularies map[string]*Vocabulary) error { + vd := validator{ + v: v, + vloc: make([]string, 0, 8), + sch: sch, + scp: &scope{sch, "", 0, nil}, + uneval: unevalFrom(v, sch, false), + errors: nil, + boolResult: false, + regexpEngine: regexpEngine, + meta: meta, + resources: resources, + assertVocabs: assertVocabs, + vocabularies: vocabularies, + } + if _, err := vd.validate(); err != nil { + verr := err.(*ValidationError) + var causes []*ValidationError + if _, ok := verr.ErrorKind.(*kind.Group); ok { + causes = verr.Causes + } else { + causes = []*ValidationError{verr} + } + return &ValidationError{ + SchemaURL: sch.Location, + InstanceLocation: nil, + ErrorKind: &kind.Schema{Location: sch.Location}, + Causes: causes, + } + } + + return nil +} + +type validator struct { + v any + vloc []string + sch *Schema + scp *scope + uneval *uneval + errors []*ValidationError + boolResult bool // is interested to know valid or not (but not actuall error) + regexpEngine RegexpEngine + + // meta validation + meta *Schema // set only when validating with metaschema + resources map[jsonPointer]*resource // resources which should be validated with their dialect + assertVocabs bool + vocabularies map[string]*Vocabulary +} + +func (vd *validator) validate() (*uneval, error) { + s := vd.sch + v := vd.v + + // boolean -- + if s.Bool != nil { + if *s.Bool { + return vd.uneval, nil + } else { + return nil, vd.error(&kind.FalseSchema{}) + } + } + + // check cycle -- + if scp := vd.scp.checkCycle(); scp != nil { + return nil, vd.error(&kind.RefCycle{ + URL: s.Location, + KeywordLocation1: vd.scp.kwLoc(), + KeywordLocation2: scp.kwLoc(), + }) + } + + t := typeOf(v) + if t == invalidType { + return nil, vd.error(&kind.InvalidJsonValue{Value: v}) + } + + // type -- + if s.Types != nil && !s.Types.IsEmpty() { + matched := s.Types.contains(t) || (s.Types.contains(integerType) && t == numberType && isInteger(v)) + if !matched { + return nil, vd.error(&kind.Type{Got: t.String(), Want: s.Types.ToStrings()}) + } + } + + // const -- + if s.Const != nil { + ok, k := equals(v, *s.Const) + if k != nil { + return nil, vd.error(k) + } else if !ok { + return nil, vd.error(&kind.Const{Got: v, Want: *s.Const}) + } + } + + // enum -- + if s.Enum != nil { + matched := s.Enum.types.contains(typeOf(v)) + if matched { + matched = false + for _, item := range s.Enum.Values { + ok, k := equals(v, item) + if k != nil { + return nil, vd.error(k) + } else if ok { + matched = true + break + } + } + } + if !matched { + return nil, vd.error(&kind.Enum{Got: v, Want: s.Enum.Values}) + } + } + + // format -- + if s.Format != nil { + var err error + if s.Format.Name == "regex" && vd.regexpEngine != nil { + err = vd.regexpEngine.validate(v) + } else { + err = s.Format.Validate(v) + } + if err != nil { + return nil, vd.error(&kind.Format{Got: v, Want: s.Format.Name, Err: err}) + } + } + + // $ref -- + if s.Ref != nil { + err := vd.validateRef(s.Ref, "$ref") + if s.DraftVersion < 2019 { + return vd.uneval, err + } + if err != nil { + vd.addErr(err) + } + } + + // type specific validations -- + switch v := v.(type) { + case map[string]any: + vd.objValidate(v) + case []any: + vd.arrValidate(v) + case string: + vd.strValidate(v) + case json.Number, float32, float64, int, int8, int16, int32, int64, uint, uint8, uint16, uint32, uint64: + vd.numValidate(v) + } + + if len(vd.errors) == 0 || !vd.boolResult { + if s.DraftVersion >= 2019 { + vd.validateRefs() + } + vd.condValidate() + + for _, ext := range s.Extensions { + ext.Validate(&ValidatorContext{vd}, v) + } + + if s.DraftVersion >= 2019 { + vd.unevalValidate() + } + } + + switch len(vd.errors) { + case 0: + return vd.uneval, nil + case 1: + return nil, vd.errors[0] + default: + verr := vd.error(&kind.Group{}) + verr.Causes = vd.errors + return nil, verr + } +} + +func (vd *validator) objValidate(obj map[string]any) { + s := vd.sch + + // minProperties -- + if s.MinProperties != nil { + if len(obj) < *s.MinProperties { + vd.addError(&kind.MinProperties{Got: len(obj), Want: *s.MinProperties}) + } + } + + // maxProperties -- + if s.MaxProperties != nil { + if len(obj) > *s.MaxProperties { + vd.addError(&kind.MaxProperties{Got: len(obj), Want: *s.MaxProperties}) + } + } + + // required -- + if len(s.Required) > 0 { + if missing := vd.findMissing(obj, s.Required); missing != nil { + vd.addError(&kind.Required{Missing: missing}) + } + } + + if vd.boolResult && len(vd.errors) > 0 { + return + } + + // dependencies -- + for pname, dep := range s.Dependencies { + if _, ok := obj[pname]; ok { + switch dep := dep.(type) { + case []string: + if missing := vd.findMissing(obj, dep); missing != nil { + vd.addError(&kind.Dependency{Prop: pname, Missing: missing}) + } + case *Schema: + vd.addErr(vd.validateSelf(dep, "", false)) + } + } + } + + var additionalPros []string + for pname, pvalue := range obj { + if vd.boolResult && len(vd.errors) > 0 { + return + } + evaluated := false + + // properties -- + if sch, ok := s.Properties[pname]; ok { + evaluated = true + vd.addErr(vd.validateVal(sch, pvalue, pname)) + } + + // patternProperties -- + for regex, sch := range s.PatternProperties { + if regex.MatchString(pname) { + evaluated = true + vd.addErr(vd.validateVal(sch, pvalue, pname)) + } + } + + if !evaluated && s.AdditionalProperties != nil { + evaluated = true + switch additional := s.AdditionalProperties.(type) { + case bool: + if !additional { + additionalPros = append(additionalPros, pname) + } + case *Schema: + vd.addErr(vd.validateVal(additional, pvalue, pname)) + } + } + + if evaluated { + delete(vd.uneval.props, pname) + } + } + if len(additionalPros) > 0 { + vd.addError(&kind.AdditionalProperties{Properties: additionalPros}) + } + + if s.DraftVersion == 4 { + return + } + + // propertyNames -- + if s.PropertyNames != nil { + for pname := range obj { + sch, meta, resources := s.PropertyNames, vd.meta, vd.resources + res := vd.metaResource(sch) + if res != nil { + meta = res.dialect.getSchema(vd.assertVocabs, vd.vocabularies) + sch = meta + } + if err := sch.validate(pname, vd.regexpEngine, meta, resources, vd.assertVocabs, vd.vocabularies); err != nil { + verr := err.(*ValidationError) + verr.SchemaURL = s.PropertyNames.Location + verr.ErrorKind = &kind.PropertyNames{Property: pname} + vd.addErr(verr) + } + } + } + + if s.DraftVersion == 6 { + return + } + + // dependentSchemas -- + for pname, sch := range s.DependentSchemas { + if _, ok := obj[pname]; ok { + vd.addErr(vd.validateSelf(sch, "", false)) + } + } + + // dependentRequired -- + for pname, reqd := range s.DependentRequired { + if _, ok := obj[pname]; ok { + if missing := vd.findMissing(obj, reqd); missing != nil { + vd.addError(&kind.DependentRequired{Prop: pname, Missing: missing}) + } + } + } +} + +func (vd *validator) arrValidate(arr []any) { + s := vd.sch + + // minItems -- + if s.MinItems != nil { + if len(arr) < *s.MinItems { + vd.addError(&kind.MinItems{Got: len(arr), Want: *s.MinItems}) + } + } + + // maxItems -- + if s.MaxItems != nil { + if len(arr) > *s.MaxItems { + vd.addError(&kind.MaxItems{Got: len(arr), Want: *s.MaxItems}) + } + } + + // uniqueItems -- + if s.UniqueItems && len(arr) > 1 { + i, j, k := duplicates(arr) + if k != nil { + vd.addError(k) + } else if i != -1 { + vd.addError(&kind.UniqueItems{Duplicates: [2]int{i, j}}) + } + } + + if s.DraftVersion < 2020 { + evaluated := 0 + + // items -- + switch items := s.Items.(type) { + case *Schema: + for i, item := range arr { + vd.addErr(vd.validateVal(items, item, strconv.Itoa(i))) + } + evaluated = len(arr) + case []*Schema: + min := minInt(len(arr), len(items)) + for i, item := range arr[:min] { + vd.addErr(vd.validateVal(items[i], item, strconv.Itoa(i))) + } + evaluated = min + } + + // additionalItems -- + if s.AdditionalItems != nil { + switch additional := s.AdditionalItems.(type) { + case bool: + if !additional && evaluated != len(arr) { + vd.addError(&kind.AdditionalItems{Count: len(arr) - evaluated}) + } + case *Schema: + for i, item := range arr[evaluated:] { + vd.addErr(vd.validateVal(additional, item, strconv.Itoa(i))) + } + } + } + } else { + evaluated := minInt(len(s.PrefixItems), len(arr)) + + // prefixItems -- + for i, item := range arr[:evaluated] { + vd.addErr(vd.validateVal(s.PrefixItems[i], item, strconv.Itoa(i))) + } + + // items2020 -- + if s.Items2020 != nil { + for i, item := range arr[evaluated:] { + vd.addErr(vd.validateVal(s.Items2020, item, strconv.Itoa(i))) + } + } + } + + // contains -- + if s.Contains != nil { + var errors []*ValidationError + var matched []int + + for i, item := range arr { + if err := vd.validateVal(s.Contains, item, strconv.Itoa(i)); err != nil { + errors = append(errors, err.(*ValidationError)) + } else { + matched = append(matched, i) + if s.DraftVersion >= 2020 { + delete(vd.uneval.items, i) + } + } + } + + // minContains -- + if s.MinContains != nil { + if len(matched) < *s.MinContains { + vd.addErrors(errors, &kind.MinContains{Got: matched, Want: *s.MinContains}) + } + } else if len(matched) == 0 { + vd.addErrors(errors, &kind.Contains{}) + } + + // maxContains -- + if s.MaxContains != nil { + if len(matched) > *s.MaxContains { + vd.addError(&kind.MaxContains{Got: matched, Want: *s.MaxContains}) + } + } + } +} + +func (vd *validator) strValidate(str string) { + s := vd.sch + + strLen := -1 + if s.MinLength != nil || s.MaxLength != nil { + strLen = utf8.RuneCount([]byte(str)) + } + + // minLength -- + if s.MinLength != nil { + if strLen < *s.MinLength { + vd.addError(&kind.MinLength{Got: strLen, Want: *s.MinLength}) + } + } + + // maxLength -- + if s.MaxLength != nil { + if strLen > *s.MaxLength { + vd.addError(&kind.MaxLength{Got: strLen, Want: *s.MaxLength}) + } + } + + // pattern -- + if s.Pattern != nil { + if !s.Pattern.MatchString(str) { + vd.addError(&kind.Pattern{Got: str, Want: s.Pattern.String()}) + } + } + + if s.DraftVersion == 6 { + return + } + + var err error + + // contentEncoding -- + decoded := []byte(str) + if s.ContentEncoding != nil { + decoded, err = s.ContentEncoding.Decode(str) + if err != nil { + decoded = nil + vd.addError(&kind.ContentEncoding{Want: s.ContentEncoding.Name, Err: err}) + } + } + + var deserialized *any + if decoded != nil && s.ContentMediaType != nil { + if s.ContentSchema == nil { + err = s.ContentMediaType.Validate(decoded) + } else { + var value any + value, err = s.ContentMediaType.UnmarshalJSON(decoded) + if err == nil { + deserialized = &value + } + } + if err != nil { + vd.addError(&kind.ContentMediaType{ + Got: decoded, + Want: s.ContentMediaType.Name, + Err: err, + }) + } + } + + if deserialized != nil && s.ContentSchema != nil { + sch, meta, resources := s.ContentSchema, vd.meta, vd.resources + res := vd.metaResource(sch) + if res != nil { + meta = res.dialect.getSchema(vd.assertVocabs, vd.vocabularies) + sch = meta + } + if err = sch.validate(*deserialized, vd.regexpEngine, meta, resources, vd.assertVocabs, vd.vocabularies); err != nil { + verr := err.(*ValidationError) + verr.SchemaURL = s.Location + verr.ErrorKind = &kind.ContentSchema{} + vd.addErr(verr) + } + } +} + +func (vd *validator) numValidate(v any) { + s := vd.sch + + var numVal *big.Rat + num := func() *big.Rat { + if numVal == nil { + numVal, _ = new(big.Rat).SetString(fmt.Sprintf("%v", v)) + } + return numVal + } + + // minimum -- + if s.Minimum != nil && num().Cmp(s.Minimum) < 0 { + vd.addError(&kind.Minimum{Got: num(), Want: s.Minimum}) + } + + // maximum -- + if s.Maximum != nil && num().Cmp(s.Maximum) > 0 { + vd.addError(&kind.Maximum{Got: num(), Want: s.Maximum}) + } + + // exclusiveMinimum + if s.ExclusiveMinimum != nil && num().Cmp(s.ExclusiveMinimum) <= 0 { + vd.addError(&kind.ExclusiveMinimum{Got: num(), Want: s.ExclusiveMinimum}) + } + + // exclusiveMaximum + if s.ExclusiveMaximum != nil && num().Cmp(s.ExclusiveMaximum) >= 0 { + vd.addError(&kind.ExclusiveMaximum{Got: num(), Want: s.ExclusiveMaximum}) + } + + // multipleOf + if s.MultipleOf != nil { + if q := new(big.Rat).Quo(num(), s.MultipleOf); !q.IsInt() { + vd.addError(&kind.MultipleOf{Got: num(), Want: s.MultipleOf}) + } + } +} + +func (vd *validator) condValidate() { + s := vd.sch + + // not -- + if s.Not != nil { + if vd.validateSelf(s.Not, "", true) == nil { + vd.addError(&kind.Not{}) + } + } + + // allOf -- + if len(s.AllOf) > 0 { + var errors []*ValidationError + for _, sch := range s.AllOf { + if err := vd.validateSelf(sch, "", false); err != nil { + errors = append(errors, err.(*ValidationError)) + if vd.boolResult { + break + } + } + } + if len(errors) != 0 { + vd.addErrors(errors, &kind.AllOf{}) + } + } + + // anyOf + if len(s.AnyOf) > 0 { + var matched bool + var errors []*ValidationError + for _, sch := range s.AnyOf { + if err := vd.validateSelf(sch, "", false); err != nil { + errors = append(errors, err.(*ValidationError)) + } else { + matched = true + // for uneval, all schemas must be evaluated + if vd.uneval.isEmpty() { + break + } + } + } + if !matched { + vd.addErrors(errors, &kind.AnyOf{}) + } + } + + // oneOf + if len(s.OneOf) > 0 { + var matched = -1 + var errors []*ValidationError + for i, sch := range s.OneOf { + if err := vd.validateSelf(sch, "", matched != -1); err != nil { + if matched == -1 { + errors = append(errors, err.(*ValidationError)) + } + } else { + if matched == -1 { + matched = i + } else { + vd.addError(&kind.OneOf{Subschemas: []int{matched, i}}) + break + } + } + } + if matched == -1 { + vd.addErrors(errors, &kind.OneOf{Subschemas: nil}) + } + } + + // if, then, else -- + if s.If != nil { + if vd.validateSelf(s.If, "", true) == nil { + if s.Then != nil { + vd.addErr(vd.validateSelf(s.Then, "", false)) + } + } else if s.Else != nil { + vd.addErr(vd.validateSelf(s.Else, "", false)) + } + } +} + +func (vd *validator) unevalValidate() { + s := vd.sch + + // unevaluatedProperties + if obj, ok := vd.v.(map[string]any); ok && s.UnevaluatedProperties != nil { + for pname := range vd.uneval.props { + if pvalue, ok := obj[pname]; ok { + vd.addErr(vd.validateVal(s.UnevaluatedProperties, pvalue, pname)) + } + } + vd.uneval.props = nil + } + + // unevaluatedItems + if arr, ok := vd.v.([]any); ok && s.UnevaluatedItems != nil { + for i := range vd.uneval.items { + vd.addErr(vd.validateVal(s.UnevaluatedItems, arr[i], strconv.Itoa(i))) + } + vd.uneval.items = nil + } +} + +// validation helpers -- + +func (vd *validator) validateSelf(sch *Schema, refKw string, boolResult bool) error { + scp := vd.scp.child(sch, refKw, vd.scp.vid) + uneval := unevalFrom(vd.v, sch, !vd.uneval.isEmpty()) + subvd := validator{ + v: vd.v, + vloc: vd.vloc, + sch: sch, + scp: scp, + uneval: uneval, + errors: nil, + boolResult: vd.boolResult || boolResult, + regexpEngine: vd.regexpEngine, + meta: vd.meta, + resources: vd.resources, + assertVocabs: vd.assertVocabs, + vocabularies: vd.vocabularies, + } + subvd.handleMeta() + uneval, err := subvd.validate() + if err == nil { + vd.uneval.merge(uneval) + } + return err +} + +func (vd *validator) validateVal(sch *Schema, v any, vtok string) error { + vloc := append(vd.vloc, vtok) + scp := vd.scp.child(sch, "", vd.scp.vid+1) + uneval := unevalFrom(v, sch, false) + subvd := validator{ + v: v, + vloc: vloc, + sch: sch, + scp: scp, + uneval: uneval, + errors: nil, + boolResult: vd.boolResult, + regexpEngine: vd.regexpEngine, + meta: vd.meta, + resources: vd.resources, + assertVocabs: vd.assertVocabs, + vocabularies: vd.vocabularies, + } + subvd.handleMeta() + _, err := subvd.validate() + return err +} + +func (vd *validator) validateValue(sch *Schema, v any, vpath []string) error { + vloc := append(vd.vloc, vpath...) + scp := vd.scp.child(sch, "", vd.scp.vid+1) + uneval := unevalFrom(v, sch, false) + subvd := validator{ + v: v, + vloc: vloc, + sch: sch, + scp: scp, + uneval: uneval, + errors: nil, + boolResult: vd.boolResult, + regexpEngine: vd.regexpEngine, + meta: vd.meta, + resources: vd.resources, + assertVocabs: vd.assertVocabs, + vocabularies: vd.vocabularies, + } + subvd.handleMeta() + _, err := subvd.validate() + return err +} + +func (vd *validator) metaResource(sch *Schema) *resource { + if sch != vd.meta { + return nil + } + ptr := "" + for _, tok := range vd.instanceLocation() { + ptr += "/" + ptr += escape(tok) + } + return vd.resources[jsonPointer(ptr)] +} + +func (vd *validator) handleMeta() { + res := vd.metaResource(vd.sch) + if res == nil { + return + } + sch := res.dialect.getSchema(vd.assertVocabs, vd.vocabularies) + vd.meta = sch + vd.sch = sch +} + +// reference validation -- + +func (vd *validator) validateRef(sch *Schema, kw string) error { + err := vd.validateSelf(sch, kw, false) + if err != nil { + refErr := vd.error(&kind.Reference{Keyword: kw, URL: sch.Location}) + verr := err.(*ValidationError) + if _, ok := verr.ErrorKind.(*kind.Group); ok { + refErr.Causes = verr.Causes + } else { + refErr.Causes = append(refErr.Causes, verr) + } + return refErr + } + return nil +} + +func (vd *validator) resolveRecursiveAnchor(fallback *Schema) *Schema { + sch := fallback + scp := vd.scp + for scp != nil { + if scp.sch.resource.RecursiveAnchor { + sch = scp.sch + } + scp = scp.parent + } + return sch +} + +func (vd *validator) resolveDynamicAnchor(name string, fallback *Schema) *Schema { + sch := fallback + scp := vd.scp + for scp != nil { + if dsch, ok := scp.sch.resource.dynamicAnchors[name]; ok { + sch = dsch + } + scp = scp.parent + } + return sch +} + +func (vd *validator) validateRefs() { + // $recursiveRef -- + if sch := vd.sch.RecursiveRef; sch != nil { + if sch.RecursiveAnchor { + sch = vd.resolveRecursiveAnchor(sch) + } + vd.addErr(vd.validateRef(sch, "$recursiveRef")) + } + + // $dynamicRef -- + if dref := vd.sch.DynamicRef; dref != nil { + sch := dref.Ref // initial target + if dref.Anchor != "" { + // $dynamicRef includes anchor + if sch.DynamicAnchor == dref.Anchor { + // initial target has matching $dynamicAnchor + sch = vd.resolveDynamicAnchor(dref.Anchor, sch) + } + } + vd.addErr(vd.validateRef(sch, "$dynamicRef")) + } +} + +// error helpers -- + +func (vd *validator) instanceLocation() []string { + return slices.Clone(vd.vloc) +} + +func (vd *validator) error(kind ErrorKind) *ValidationError { + if vd.boolResult { + return &ValidationError{} + } + return &ValidationError{ + SchemaURL: vd.sch.Location, + InstanceLocation: vd.instanceLocation(), + ErrorKind: kind, + Causes: nil, + } +} + +func (vd *validator) addErr(err error) { + if err != nil { + vd.errors = append(vd.errors, err.(*ValidationError)) + } +} + +func (vd *validator) addError(kind ErrorKind) { + vd.errors = append(vd.errors, vd.error(kind)) +} + +func (vd *validator) addErrors(errors []*ValidationError, kind ErrorKind) { + err := vd.error(kind) + err.Causes = errors + vd.errors = append(vd.errors, err) +} + +func (vd *validator) findMissing(obj map[string]any, reqd []string) []string { + var missing []string + for _, pname := range reqd { + if _, ok := obj[pname]; !ok { + if vd.boolResult { + return []string{} // non-nil + } + missing = append(missing, pname) + } + } + return missing +} + +// -- + +type scope struct { + sch *Schema + + // if empty, compute from self.sch and self.parent.sch. + // not empty, only when there is a jump i.e, $ref, $XXXRef + refKeyword string + + // unique id of value being validated + // if two scopes validate same value, they will have + // same vid + vid int + + parent *scope +} + +func (sc *scope) child(sch *Schema, refKeyword string, vid int) *scope { + return &scope{sch, refKeyword, vid, sc} +} + +func (sc *scope) checkCycle() *scope { + scp := sc.parent + for scp != nil { + if scp.vid != sc.vid { + break + } + if scp.sch == sc.sch { + return scp + } + scp = scp.parent + } + return nil +} + +func (sc *scope) kwLoc() string { + var loc string + for sc.parent != nil { + if sc.refKeyword != "" { + loc = fmt.Sprintf("/%s%s", escape(sc.refKeyword), loc) + } else { + cur := sc.sch.Location + parent := sc.parent.sch.Location + loc = fmt.Sprintf("%s%s", cur[len(parent):], loc) + } + sc = sc.parent + } + return loc +} + +// -- + +type uneval struct { + props map[string]struct{} + items map[int]struct{} +} + +func unevalFrom(v any, sch *Schema, callerNeeds bool) *uneval { + uneval := &uneval{} + switch v := v.(type) { + case map[string]any: + if !sch.allPropsEvaluated && (callerNeeds || sch.UnevaluatedProperties != nil) { + uneval.props = map[string]struct{}{} + for k := range v { + uneval.props[k] = struct{}{} + } + } + case []any: + if !sch.allItemsEvaluated && (callerNeeds || sch.UnevaluatedItems != nil) && sch.numItemsEvaluated < len(v) { + uneval.items = map[int]struct{}{} + for i := sch.numItemsEvaluated; i < len(v); i++ { + uneval.items[i] = struct{}{} + } + } + } + return uneval +} + +func (ue *uneval) merge(other *uneval) { + for k := range ue.props { + if _, ok := other.props[k]; !ok { + delete(ue.props, k) + } + } + for i := range ue.items { + if _, ok := other.items[i]; !ok { + delete(ue.items, i) + } + } +} + +func (ue *uneval) isEmpty() bool { + return len(ue.props) == 0 && len(ue.items) == 0 +} + +// -- + +type ValidationError struct { + // absolute, dereferenced schema location. + SchemaURL string + + // location of the JSON value within the instance being validated. + InstanceLocation []string + + // kind of error + ErrorKind ErrorKind + + // holds nested errors + Causes []*ValidationError +} + +type ErrorKind interface { + KeywordPath() []string + LocalizedString(*message.Printer) string +} diff --git a/vendor/github.com/santhosh-tekuri/jsonschema/v6/vocab.go b/vendor/github.com/santhosh-tekuri/jsonschema/v6/vocab.go new file mode 100644 index 00000000..18ace91e --- /dev/null +++ b/vendor/github.com/santhosh-tekuri/jsonschema/v6/vocab.go @@ -0,0 +1,106 @@ +package jsonschema + +// CompilerContext provides helpers for +// compiling a [Vocabulary]. +type CompilerContext struct { + c *objCompiler +} + +func (ctx *CompilerContext) Enqueue(schPath []string) *Schema { + ptr := ctx.c.up.ptr + for _, tok := range schPath { + ptr = ptr.append(tok) + } + return ctx.c.enqueuePtr(ptr) +} + +// Vocabulary defines a set of keywords, their syntax and +// their semantics. +type Vocabulary struct { + // URL identifier for this Vocabulary. + URL string + + // Schema that is used to validate the keywords that is introduced by this + // vocabulary. + Schema *Schema + + // Subschemas lists the possible locations of subschemas introduced by + // this vocabulary. + Subschemas []SchemaPath + + // Compile compiles the keywords(introduced by this vocabulary) in obj into [SchemaExt]. + // If obj does not contain any keywords introduced by this vocabulary, nil SchemaExt must + // be returned. + Compile func(ctx *CompilerContext, obj map[string]any) (SchemaExt, error) +} + +// -- + +// SchemaExt is compled form of vocabulary. +type SchemaExt interface { + // Validate validates v against and errors if any are reported + // to ctx. + Validate(ctx *ValidatorContext, v any) +} + +// ValidatorContext provides helpers for +// validating with [SchemaExt]. +type ValidatorContext struct { + vd *validator +} + +// Validate validates v with sch. vpath gives path of v from current context value. +func (ctx *ValidatorContext) Validate(sch *Schema, v any, vpath []string) error { + switch len(vpath) { + case 0: + return ctx.vd.validateSelf(sch, "", false) + case 1: + return ctx.vd.validateVal(sch, v, vpath[0]) + default: + return ctx.vd.validateValue(sch, v, vpath) + } +} + +// EvaluatedProp marks given property of current object as evaluated. +func (ctx *ValidatorContext) EvaluatedProp(pname string) { + delete(ctx.vd.uneval.props, pname) +} + +// EvaluatedItem marks items at given index of current array as evaluated. +func (ctx *ValidatorContext) EvaluatedItem(index int) { + delete(ctx.vd.uneval.items, index) +} + +// AddError reports validation-error of given kind. +func (ctx *ValidatorContext) AddError(k ErrorKind) { + ctx.vd.addError(k) +} + +// AddErrors reports validation-errors of given kind. +func (ctx *ValidatorContext) AddErrors(errors []*ValidationError, k ErrorKind) { + ctx.vd.addErrors(errors, k) +} + +// AddErr reports the given err. This is typically used to report +// the error created by subschema validation. +// +// NOTE that err must be of type *ValidationError. +func (ctx *ValidatorContext) AddErr(err error) { + ctx.vd.addErr(err) +} + +func (ctx *ValidatorContext) Equals(v1, v2 any) (bool, error) { + b, k := equals(v1, v2) + if k != nil { + return false, ctx.vd.error(k) + } + return b, nil +} + +func (ctx *ValidatorContext) Duplicates(arr []any) (int, int, error) { + i, j, k := duplicates(arr) + if k != nil { + return -1, -1, ctx.vd.error(k) + } + return i, j, nil +} diff --git a/vendor/github.com/xhit/go-str2duration/v2/LICENSE b/vendor/github.com/xhit/go-str2duration/v2/LICENSE new file mode 100644 index 00000000..ea5ea898 --- /dev/null +++ b/vendor/github.com/xhit/go-str2duration/v2/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2009 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/vendor/github.com/xhit/go-str2duration/v2/README.md b/vendor/github.com/xhit/go-str2duration/v2/README.md new file mode 100644 index 00000000..03263737 --- /dev/null +++ b/vendor/github.com/xhit/go-str2duration/v2/README.md @@ -0,0 +1,88 @@ +# Go String To Duration (go-str2duration) + +This package allows to get a time.Duration from a string. The string can be a string retorned for time.Duration or a similar string with weeks or days too!. + +Go Report Card +go.dev + +## Download + +```bash +go get github.com/xhit/go-str2duration/v2 +``` + +## Features + +Go String To Duration supports this strings conversions to duration: +- All strings returned in time.Duration String. +- A string more readable like 1w2d6h3ns (1 week 2 days 6 hours and 3 nanoseconds). +- `µs` and `us` are microsecond. + +It's the same `time.ParseDuration` standard function in Go, but with days and week support. + +**Note**: a day is 24 hour. + +If you don't need days and weeks, use [`time.ParseDuration`](https://golang.org/pkg/time/#ParseDuration). + +## Usage + +```go +package main + +import ( + "fmt" + str2duration "github.com/xhit/go-str2duration/v2" + "os" + "time" +) + +func main() { + + for i, tt := range []struct { + dur string + expected time.Duration + }{ + //This times are returned with time.Duration string + {"1h", time.Duration(time.Hour)}, + {"1m", time.Duration(time.Minute)}, + {"1s", time.Duration(time.Second)}, + {"1ms", time.Duration(time.Millisecond)}, + {"1µs", time.Duration(time.Microsecond)}, + {"1us", time.Duration(time.Microsecond)}, + {"1ns", time.Duration(time.Nanosecond)}, + {"4.000000001s", time.Duration(4*time.Second + time.Nanosecond)}, + {"1h0m4.000000001s", time.Duration(time.Hour + 4*time.Second + time.Nanosecond)}, + {"1h1m0.01s", time.Duration(61*time.Minute + 10*time.Millisecond)}, + {"1h1m0.123456789s", time.Duration(61*time.Minute + 123456789*time.Nanosecond)}, + {"1.00002ms", time.Duration(time.Millisecond + 20*time.Nanosecond)}, + {"1.00000002s", time.Duration(time.Second + 20*time.Nanosecond)}, + {"693ns", time.Duration(693 * time.Nanosecond)}, + + //This times aren't returned with time.Duration string, but are easily readable and can be parsed too! + {"1ms1ns", time.Duration(time.Millisecond + 1*time.Nanosecond)}, + {"1s20ns", time.Duration(time.Second + 20*time.Nanosecond)}, + {"60h8ms", time.Duration(60*time.Hour + 8*time.Millisecond)}, + {"96h63s", time.Duration(96*time.Hour + 63*time.Second)}, + + //And works with days and weeks! + {"2d3s96ns", time.Duration(48*time.Hour + 3*time.Second + 96*time.Nanosecond)}, + {"1w2d3s96ns", time.Duration(168*time.Hour + 48*time.Hour + 3*time.Second + 96*time.Nanosecond)}, + + {"10s1us693ns", time.Duration(10*time.Second + time.Microsecond + 693*time.Nanosecond)}, + + } { + durationFromString, err := str2duration.ParseDuration(tt.dur) + if err != nil { + panic(err) + + //Check if expected time is the time returned by the parser + } else if tt.expected != durationFromString { + fmt.Println(fmt.Sprintf("index %d -> in: %s returned: %s\tnot equal to %s", i, tt.dur, durationFromString.String(), tt.expected.String())) + }else{ + fmt.Println(fmt.Sprintf("index %d -> in: %s parsed succesfully", i, tt.dur)) + } + } +} +``` + +Also, you can convert to string the duration using `String(t time.Duration)` function. This support weeks and days and not return the ugly decimals from golang standard `t.String()` function. Units with 0 values aren't returned. For example: `1d1ms` means 1 day 1 millisecond. \ No newline at end of file diff --git a/vendor/github.com/xhit/go-str2duration/v2/str2duration.go b/vendor/github.com/xhit/go-str2duration/v2/str2duration.go new file mode 100644 index 00000000..51631db5 --- /dev/null +++ b/vendor/github.com/xhit/go-str2duration/v2/str2duration.go @@ -0,0 +1,331 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in https://raw.githubusercontent.com/golang/go/master/LICENSE + +package str2duration + +import ( + "errors" + "time" +) + +var unitMap = map[string]int64{ + "ns": int64(time.Nanosecond), + "us": int64(time.Microsecond), + "µs": int64(time.Microsecond), // U+00B5 = micro symbol + "μs": int64(time.Microsecond), // U+03BC = Greek letter mu + "ms": int64(time.Millisecond), + "s": int64(time.Second), + "m": int64(time.Minute), + "h": int64(time.Hour), + "d": int64(time.Hour) * 24, + "w": int64(time.Hour) * 168, +} + +// ParseDuration parses a duration string. +// A duration string is a possibly signed sequence of +// decimal numbers, each with optional fraction and a unit suffix, +// such as "300ms", "-1.5h" or "2h45m". +// Valid time units are "ns", "us" (or "µs"), "ms", "s", "m", "h", "d", "w". +func ParseDuration(s string) (time.Duration, error) { + // [-+]?([0-9]*(\.[0-9]*)?[a-z]+)+ + orig := s + var d int64 + neg := false + + // Consume [-+]? + if s != "" { + c := s[0] + if c == '-' || c == '+' { + neg = c == '-' + s = s[1:] + } + } + // Special case: if all that is left is "0", this is zero. + if s == "0" { + return 0, nil + } + if s == "" { + return 0, errors.New("time: invalid duration " + quote(orig)) + } + for s != "" { + var ( + v, f int64 // integers before, after decimal point + scale float64 = 1 // value = v + f/scale + ) + + var err error + + // The next character must be [0-9.] + if !(s[0] == '.' || '0' <= s[0] && s[0] <= '9') { + return 0, errors.New("time: invalid duration " + quote(orig)) + } + // Consume [0-9]* + pl := len(s) + v, s, err = leadingInt(s) + if err != nil { + return 0, errors.New("time: invalid duration " + quote(orig)) + } + pre := pl != len(s) // whether we consumed anything before a period + + // Consume (\.[0-9]*)? + post := false + if s != "" && s[0] == '.' { + s = s[1:] + pl := len(s) + f, scale, s = leadingFraction(s) + post = pl != len(s) + } + if !pre && !post { + // no digits (e.g. ".s" or "-.s") + return 0, errors.New("time: invalid duration " + quote(orig)) + } + + // Consume unit. + i := 0 + for ; i < len(s); i++ { + c := s[i] + if c == '.' || '0' <= c && c <= '9' { + break + } + } + if i == 0 { + return 0, errors.New("time: missing unit in duration " + quote(orig)) + } + u := s[:i] + s = s[i:] + unit, ok := unitMap[u] + if !ok { + return 0, errors.New("time: unknown unit " + quote(u) + " in duration " + quote(orig)) + } + if v > (1<<63-1)/unit { + // overflow + return 0, errors.New("time: invalid duration " + quote(orig)) + } + v *= unit + if f > 0 { + // float64 is needed to be nanosecond accurate for fractions of hours. + // v >= 0 && (f*unit/scale) <= 3.6e+12 (ns/h, h is the largest unit) + v += int64(float64(f) * (float64(unit) / scale)) + if v < 0 { + // overflow + return 0, errors.New("time: invalid duration " + quote(orig)) + } + } + d += v + if d < 0 { + // overflow + return 0, errors.New("time: invalid duration " + quote(orig)) + } + } + + if neg { + d = -d + } + return time.Duration(d), nil +} + +func quote(s string) string { + return "\"" + s + "\"" +} + +var errLeadingInt = errors.New("time: bad [0-9]*") // never printed + +// leadingInt consumes the leading [0-9]* from s. +func leadingInt(s string) (x int64, rem string, err error) { + i := 0 + for ; i < len(s); i++ { + c := s[i] + if c < '0' || c > '9' { + break + } + if x > (1<<63-1)/10 { + // overflow + return 0, "", errLeadingInt + } + x = x*10 + int64(c) - '0' + if x < 0 { + // overflow + return 0, "", errLeadingInt + } + } + return x, s[i:], nil +} + +// leadingFraction consumes the leading [0-9]* from s. +// It is used only for fractions, so does not return an error on overflow, +// it just stops accumulating precision. +func leadingFraction(s string) (x int64, scale float64, rem string) { + i := 0 + scale = 1 + overflow := false + for ; i < len(s); i++ { + c := s[i] + if c < '0' || c > '9' { + break + } + if overflow { + continue + } + if x > (1<<63-1)/10 { + // It's possible for overflow to give a positive number, so take care. + overflow = true + continue + } + y := x*10 + int64(c) - '0' + if y < 0 { + overflow = true + continue + } + x = y + scale *= 10 + } + return x, scale, s[i:] +} + +// String returns a string representing the duration in the form "1w4d2h3m5s". +// Units with 0 values aren't returned, for example: 1d1ms is 1 day 1 milliseconds +func String(d time.Duration) string { + if d == 0 { + return "0s" + } + + // Largest time is 15250w1d23h47m16s854ms775us807ns + var buf [32]byte + w := len(buf) + var sign string + + u := uint64(d) + neg := d < 0 + if neg { + u = -u + sign = "-" + } + + // u is nanoseconds (ns) + if u > 0 { + w-- + + if u%1000 > 0 { + buf[w] = 's' + w-- + buf[w] = 'n' + w = fmtInt(buf[:w], u%1000) + } else { + w++ + } + + u /= 1000 + + // u is now integer microseconds (us) + if u > 0 { + w-- + if u%1000 > 0 { + buf[w] = 's' + w-- + buf[w] = 'u' + w = fmtInt(buf[:w], u%1000) + } else { + w++ + } + u /= 1000 + + // u is now integer milliseconds (ms) + if u > 0 { + w-- + if u%1000 > 0 { + buf[w] = 's' + w-- + buf[w] = 'm' + w = fmtInt(buf[:w], u%1000) + } else { + w++ + } + u /= 1000 + + // u is now integer seconds (s) + if u > 0 { + w-- + if u%60 > 0 { + buf[w] = 's' + w = fmtInt(buf[:w], u%60) + } else { + w++ + } + u /= 60 + + // u is now integer minutes (m) + if u > 0 { + w-- + + if u%60 > 0 { + buf[w] = 'm' + w = fmtInt(buf[:w], u%60) + } else { + w++ + } + + u /= 60 + + // u is now integer hours (h) + if u > 0 { + w-- + + if u%24 > 0 { + buf[w] = 'h' + w = fmtInt(buf[:w], u%24) + } else { + w++ + } + + u /= 24 + + // u is now integer days (d) + if u > 0 { + w-- + + if u%7 > 0 { + buf[w] = 'd' + w = fmtInt(buf[:w], u%7) + } else { + w++ + } + + u /= 7 + + // u is now integer weeks (w) + if u > 0 { + w-- + buf[w] = 'w' + w = fmtInt(buf[:w], u) + } + + } + + } + } + } + } + } + + } + + return sign + string(buf[w:]) +} + +// fmtInt formats v into the tail of buf. +// It returns the index where the output begins. +func fmtInt(buf []byte, v uint64) int { + w := len(buf) + if v == 0 { + w-- + buf[w] = '0' + } else { + for v > 0 { + w-- + buf[w] = byte(v%10) + '0' + v /= 10 + } + } + return w +} diff --git a/vendor/go.yaml.in/yaml/v4/.gitignore b/vendor/go.yaml.in/yaml/v4/.gitignore new file mode 100644 index 00000000..bd0517d9 --- /dev/null +++ b/vendor/go.yaml.in/yaml/v4/.gitignore @@ -0,0 +1,3 @@ +/.cache/ +/yts/testdata/ +/go-yaml diff --git a/vendor/go.yaml.in/yaml/v4/.golangci.yaml b/vendor/go.yaml.in/yaml/v4/.golangci.yaml new file mode 100644 index 00000000..67a8e89a --- /dev/null +++ b/vendor/go.yaml.in/yaml/v4/.golangci.yaml @@ -0,0 +1,46 @@ +version: '2' + +linters: + enable: + - dupword + - govet + - misspell + - nolintlint + - staticcheck + disable: + - errcheck + - ineffassign + - unused + settings: + dupword: + ignore: + - 'NULL' + - DOCUMENT-START + - BLOCK-END + misspell: + locale: US + nolintlint: + allow-unused: false + require-specific: true + require-explanation: true + govet: + enable-all: true + disable: + - fieldalignment + - shadow + staticcheck: + checks: + # enable all rules + - all + # disable some of them + - -QF1001 # De Morgan's laws is too opinionated. + - -ST1003 # The codebase has too many underscores in identifiers for now. + +formatters: + enable: + - gofumpt + +issues: + max-issues-per-linter: 0 + max-same-issues: 0 + uniq-by-line: false diff --git a/vendor/go.yaml.in/yaml/v4/.ls-lint.yaml b/vendor/go.yaml.in/yaml/v4/.ls-lint.yaml new file mode 100644 index 00000000..6a8a509c --- /dev/null +++ b/vendor/go.yaml.in/yaml/v4/.ls-lint.yaml @@ -0,0 +1,16 @@ +# ls-lint configuration file. +# More information on the file format can be found on https://ls-lint.org/ +ls: + # root directory + .yml: exists:0 # .yml files + .*.yml: exists:0 # .yml dotfiles + + # any subdirectory, even dotfile directory + '**': + .yml: exists:0 # .yml files + .*.yml: exists:0 # .yml dotfiles + +ignore: +- .git # git folder +- .cache # cache folder +- yts/testdata # third-party folder diff --git a/vendor/go.yaml.in/yaml/v4/.typos.toml b/vendor/go.yaml.in/yaml/v4/.typos.toml new file mode 100644 index 00000000..06603cf8 --- /dev/null +++ b/vendor/go.yaml.in/yaml/v4/.typos.toml @@ -0,0 +1,21 @@ +# this is the configuration file of typos (spell checker) +# https://github.com/crate-ci/typos + +[files] +# excluded file +extend-exclude = [ + "yts/testdata", # third-party test data +] + +# setting for Go files configuration +[type.go] +extend-ignore-re = [ + 'ba-dum-tss\W+', # this one can be found in test files + '"yYnNtTfFoO', # this one can be found in test files + 'ba\?r', # this one can be found in test files +] + +[type.go.extend-words] +# Here is a list of words we want to ignore in Go files +typ = "typ" # commonly used abbreviation for "type" in Go as "type" is a reserved identifier + diff --git a/vendor/go.yaml.in/yaml/v4/.yamllint.yaml b/vendor/go.yaml.in/yaml/v4/.yamllint.yaml new file mode 100644 index 00000000..3cd00707 --- /dev/null +++ b/vendor/go.yaml.in/yaml/v4/.yamllint.yaml @@ -0,0 +1,19 @@ +extends: default + +ignore: /**/testdata/ # ignore testdata files that are not relevant for linting + +rules: + brackets: + min-spaces-inside: 0 + max-spaces-inside: 1 # allow us to use space in brackets + document-start: + present: false + indentation: + spaces: 2 + indent-sequences: false # make sure there is no indentation for sequences + truthy: + check-keys: false # there is a problem with the "on" key in GitHub Actions + quoted-strings: + check-keys: true # by default, only values are checked + quote-type: single + required: only-when-needed diff --git a/vendor/go.yaml.in/yaml/v4/CONTRIBUTING.md b/vendor/go.yaml.in/yaml/v4/CONTRIBUTING.md new file mode 100644 index 00000000..3031a8c9 --- /dev/null +++ b/vendor/go.yaml.in/yaml/v4/CONTRIBUTING.md @@ -0,0 +1,160 @@ +Contributing to go-yaml +======================= + +Thank you for your interest in contributing to go-yaml! + +This document provides guidelines and instructions for contributing to this +project. + + +## Code of Conduct + +By participating in this project, you agree to follow our Code of Conduct. + +We expect all contributors to: +- Be respectful and inclusive +- Use welcoming and inclusive language +- Be collaborative and constructive +- Focus on what is best for both the Go and YAML communities + + +## How to Contribute + + +### Reporting Issues + +Before submitting an issue, please: +- Check if the issue already exists in our issue tracker +- Use a clear and descriptive title +- Provide detailed steps to reproduce the issue +- Include relevant code samples and error messages +- Specify your Go version and operating system +- Use the `go-yaml` CLI tool described below + + +### Using the `go-yaml` CLI Tool + +This tool can be used to inspect both the internal stages and final results of +YAML processing with the go-yaml library. +It should be used when reporting most bugs. + +The `go-yaml` CLI tool uses the `go-yaml.in/yaml/v4` library to decode and +encode YAML. +Decoding YAML is a multi-stage process that involves tokens, events, and nodes. +The `go-yaml` CLI tool lets you see all of these intermediate stages of the +decoding process. +This is crucial for understanding what go-yaml is doing internally. + +The `go-yaml` CLI tool can be built with the `make go-yaml` command or installed +with the `go install go.yaml.in/yaml/v4/cmd/go-yaml@latest` command. + +You can learn about all of its options with the `go-yaml -h` command. + +Here is an example of using it on a small piece of YAML: + +```bash +./go-yaml -t <<< ' +foo: &a1 bar +*a1: baz +``` + + +### Coding Conventions + +- Follow standard Go coding conventions +- Use `make fmt` to format your code +- Write descriptive comments for non-obvious code +- Add tests for your work +- Keep line length to 80 characters +- Use meaningful variable and function names +- Start doc and comment sentences on a new line +- Test your changes with the `go-yaml` CLI tool when working on parsing logic + + +### Commit Conventions + +- No merge commits +- Commit subject line should: + - Start with a capital letter + - Not end with a period + - Be no more than 50 characters + + +### Pull Requests + +1. Fork the repository +1. Create a new branch for your changes +1. Make your changes following our coding conventions + - If you are not sure about the coding conventions, please ask + - Look at the existing code for examples +1. Write clear commit messages +1. Update tests and documentation +1. Submit a pull request + + +### Testing + +- Ensure all tests pass with `make test` +- Add new tests for new functionality +- Update existing tests when modifying functionality + + +## Development Process + +- Installing Go is not necessary. See "The Makefile" below. +- Fork and clone the repository +- Make your changes +- Run tests, linters and formatters + - `make test-all` + - `make lint` + - `make fmt` + - `make tidy` +- Submit a [Pull Request](https://github.com/yaml/go-yaml/pulls) + + +## The Makefile + +The repository contains a Makefile (`GNUmakefile`) that provides a number of +useful targets: + +- `make test` runs the tests +- `make test v=1 count=3` runs the tests with options +- `make test GO-VERSION=1.23.4` runs the tests with a specific Go version +- `make shell` opens a shell with the project's dependencies set up +- `make shell GO-VERSION=1.23.4` opens a shell with a specific Go version +- `make fmt` runs `golangci-lint fmt ./...` +- `make lint` runs `golangci-lint run` +- `make tidy` runs `go mod tidy` +- `make install` runs `go install` +- `make distclean` cleans the project completely + +The Makefile will install all requirements for any target, including Go itself. + + +## Getting Help + +If you need help, you can: +- Open an issue with your question +- Read through our documentation +- Join our [Slack channel](https://cloud-native.slack.com/archives/C08PPAT8PS7) + + +## We are a Work in Progress + +This project is very much a team effort. +We are just getting things rolling and trying to get the foundations in place. +There are lots of opinions and ideas about how to do things, even within the +core team. + +Once our process is more mature, we will likely change the rules here. +We'll make the new rules as a team. +For now, please stick to the rules as they are. + +This project is focused on serving the needs of both the Go and YAML +communities. +Sometimes those needs can be in conflict, but we'll try to find common ground. + + +## Thank You + +Thank you for contributing to go-yaml! diff --git a/vendor/go.yaml.in/yaml/v4/GNUmakefile b/vendor/go.yaml.in/yaml/v4/GNUmakefile new file mode 100644 index 00000000..d90d7ea3 --- /dev/null +++ b/vendor/go.yaml.in/yaml/v4/GNUmakefile @@ -0,0 +1,136 @@ +# Auto-install https://github.com/makeplus/makes at specific commit: +MAKES := .cache/makes +MAKES-LOCAL := .cache/local +MAKES-COMMIT ?= 654f7c57ca30a2b08cb4aab8bb0c0d509510ad81 +$(shell [ -d $(MAKES) ] || ( \ + git clone -q https://github.com/makeplus/makes $(MAKES) && \ + git -C $(MAKES) reset -q --hard $(MAKES-COMMIT))) +ifneq ($(shell git -C $(MAKES) rev-parse HEAD), \ + $(shell git -C $(MAKES) rev-parse $(MAKES-COMMIT))) +$(error $(MAKES) is not at the correct commit: $(MAKES-COMMIT)) +endif +include $(MAKES)/init.mk +include $(MAKES)/clean.mk + +# Only auto-install go if no go exists or GO-VERSION specified: +ifeq (,$(shell command -v go)) +GO-VERSION ?= 1.24.0 +endif +GO-VERSION-NEEDED := $(GO-VERSION) + +# yaml-test-suite info: +YTS-URL ?= https://github.com/yaml/yaml-test-suite +YTS-TAG ?= data-2022-01-17 +YTS-DIR := yts/testdata/$(YTS-TAG) + +CLI-BINARY := go-yaml + +MAKES-NO-CLEAN := true +MAKES-CLEAN := $(CLI-BINARY) +MAKES-REALCLEAN := $(dir $(YTS-DIR)) + +# Setup and include go.mk and shell.mk: +GO-FILES := $(shell find -not \( -path ./.cache -prune \) -name '*.go' | sort) +GO-CMDS-SKIP := test fmt vet +ifndef GO-VERSION-NEEDED +GO-NO-DEP-GO := true +endif +GO-CMDS-RULES := true + +include $(MAKES)/go.mk + +# Set this from the `make` command to override: +GOLANGCI-LINT-VERSION ?= v2.6.0 +GOLANGCI-LINT-INSTALLER := \ + https://github.com/golangci/golangci-lint/raw/main/install.sh +GOLANGCI-LINT := $(LOCAL-BIN)/golangci-lint +GOLANGCI-LINT-VERSIONED := $(GOLANGCI-LINT)-$(GOLANGCI-LINT-VERSION) + +SHELL-DEPS += $(GOLANGCI-LINT-VERSIONED) + +ifdef GO-VERSION-NEEDED +GO-DEPS += $(GO) +else +SHELL-DEPS := $(filter-out $(GO),$(SHELL-DEPS)) +endif + +SHELL-NAME := makes go-yaml +include $(MAKES)/clean.mk +include $(MAKES)/shell.mk + +MAKES-CLEAN += $(dir $(YTS-DIR)) $(GOLANGCI-LINT) + +v ?= +count ?= 1 + + +# Test rules: +test: $(GO-DEPS) + go test$(if $v, -v) -vet=off . + +test-data: $(YTS-DIR) + +test-all: test test-yts-all + +test-yts: $(GO-DEPS) $(YTS-DIR) + go test$(if $v, -v) ./yts -count=$(count) + +test-yts-all: $(GO-DEPS) $(YTS-DIR) + @echo 'Testing yaml-test-suite' + @RUNALL=1 bash -c "$$yts_pass_fail" + +test-yts-fail: $(GO-DEPS) $(YTS-DIR) + @echo 'Testing yaml-test-suite failures' + @RUNFAILING=1 bash -c "$$yts_pass_fail" + +# Install golangci-lint for GitHub Actions: +golangci-lint-install: $(GOLANGCI-LINT) + +fmt: $(GOLANGCI-LINT-VERSIONED) + $< fmt ./... + +lint: $(GOLANGCI-LINT-VERSIONED) + $< run ./... + +cli: $(CLI-BINARY) + +$(CLI-BINARY): $(GO) + go build -o $@ ./cmd/$@ + +# Setup rules: +$(YTS-DIR): + git clone -q $(YTS-URL) $@ + git -C $@ checkout -q $(YTS-TAG) + +# Downloads golangci-lint binary and moves to versioned path +# (.cache/local/bin/golangci-lint-). +$(GOLANGCI-LINT-VERSIONED): $(GO-DEPS) + curl -sSfL $(GOLANGCI-LINT-INSTALLER) | \ + bash -s -- -b $(LOCAL-BIN) $(GOLANGCI-LINT-VERSION) + mv $(GOLANGCI-LINT) $@ + +# Moves golangci-lint- to golangci-lint for CI requirement +$(GOLANGCI-LINT): $(GOLANGCI-LINT-VERSIONED) + cp $< $@ + +define yts_pass_fail +( result=.cache/local/tmp/yts-test-results + go test ./yts -count=1 -v | + awk '/ --- (PASS|FAIL): / {print $$2, $$3}' > $$result + known_count=$$(grep -c '' yts/known-failing-tests) + pass_count=$$(grep -c '^PASS:' $$result) + fail_count=$$(grep -c '^FAIL:' $$result) + echo "PASS: $$pass_count" + echo "FAIL: $$fail_count (known: $$known_count)" + if [[ $$RUNFAILING ]] && [[ $$pass_count -gt 0 ]]; then + echo "ERROR: Found passing tests among expected failures:" + grep '^PASS:' $$result + exit 1 + fi + if [[ $$fail_count != "$$known_count" ]]; then + echo "ERROR: FAIL count differs from expected value of $$known_count" + exit 1 + fi +) +endef +export yts_pass_fail diff --git a/vendor/go.yaml.in/yaml/v4/LICENSE b/vendor/go.yaml.in/yaml/v4/LICENSE new file mode 100644 index 00000000..b0fa9711 --- /dev/null +++ b/vendor/go.yaml.in/yaml/v4/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2025 - The go-yaml Project Contributors + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.yaml.in/yaml/v4/NOTICE b/vendor/go.yaml.in/yaml/v4/NOTICE new file mode 100644 index 00000000..2a8d4267 --- /dev/null +++ b/vendor/go.yaml.in/yaml/v4/NOTICE @@ -0,0 +1,21 @@ +The following files were ported to Go from C files of libyaml, and thus are +still covered by their original MIT license, with the additional copyright +starting in 2011 when the project was ported over: + +- internal/libyaml/api.go +- internal/libyaml/emitter.go +- internal/libyaml/parser.go +- internal/libyaml/reader.go +- internal/libyaml/scanner.go +- internal/libyaml/writer.go +- internal/libyaml/yaml.go +- internal/libyaml/yamlprivate.go + +Copyright 2006-2011 - Kirill Simonov +https://opensource.org/license/mit + +All the remaining project files are covered by the Apache license: + +Copyright 2011-2019 - Canonical Ltd +Copyright 2025 - The go-yaml Project Contributors +http://www.apache.org/licenses/LICENSE-2.0 diff --git a/vendor/go.yaml.in/yaml/v4/README.md b/vendor/go.yaml.in/yaml/v4/README.md new file mode 100644 index 00000000..d62640f5 --- /dev/null +++ b/vendor/go.yaml.in/yaml/v4/README.md @@ -0,0 +1,227 @@ +go.yaml.in/yaml +=============== + +YAML Support for the Go Language + + +## Introduction + +The `yaml` package enables [Go](https://go.dev/) programs to comfortably encode +and decode [YAML](https://yaml.org/) values. + +It was originally developed within [Canonical](https://www.canonical.com) as +part of the [juju](https://juju.ubuntu.com) project, and is based on a pure Go +port of the well-known [libyaml](http://pyyaml.org/wiki/LibYAML) C library to +parse and generate YAML data quickly and reliably. + + +## Project Status + +This project started as a fork of the extremely popular [go-yaml]( +https://github.com/go-yaml/yaml/) +project, and is being maintained by the official [YAML organization]( +https://github.com/yaml/). + +The YAML team took over ongoing maintenance and development of the project after +discussion with go-yaml's author, @niemeyer, following his decision to +[label the project repository as "unmaintained"]( +https://github.com/go-yaml/yaml/blob/944c86a7d2/README.md) in April 2025. + +We have put together a team of dedicated maintainers including representatives +of go-yaml's most important downstream projects. + +We will strive to earn the trust of the various go-yaml forks to switch back to +this repository as their upstream. + +Please [contact us](https://cloud-native.slack.com/archives/C08PPAT8PS7) if you +would like to contribute or be involved. + + +### Version Intentions + +Versions `v1`, `v2`, and `v3` will remain as **frozen legacy**. +They will receive **security-fixes only** so that existing consumers keep +working without breaking changes. + +All ongoing work, including new features and routine bug-fixes, will happen in +**`v4`**. +If you’re starting a new project or upgrading an existing one, please use the +`go.yaml.in/yaml/v4` import path. + + +## Compatibility + +The `yaml` package supports most of YAML 1.2, but preserves some behavior from +1.1 for backwards compatibility. + +Specifically, v3 of the `yaml` package: + +* Supports YAML 1.1 bools (`yes`/`no`, `on`/`off`) as long as they are being + decoded into a typed bool value. + Otherwise they behave as a string. + Booleans in YAML 1.2 are `true`/`false` only. +* Supports octals encoded and decoded as `0777` per YAML 1.1, rather than + `0o777` as specified in YAML 1.2, because most parsers still use the old + format. + Octals in the `0o777` format are supported though, so new files work. +* Does not support base-60 floats. + These are gone from YAML 1.2, and were actually never supported by this + package as it's clearly a poor choice. + + +## Installation and Usage + +The import path for the package is *go.yaml.in/yaml/v4*. + +To install it, run: + +```bash +go get go.yaml.in/yaml/v4 +``` + + +## API Documentation + +See: + + +## API Stability + +The package API for yaml v3 will remain stable as described in [gopkg.in]( +https://gopkg.in). + + +## Example + +```go +package main + +import ( + "fmt" + "log" + + "go.yaml.in/yaml/v4" +) + +var data = ` +a: Easy! +b: + c: 2 + d: [3, 4] +` + +// Note: struct fields must be public in order for unmarshal to +// correctly populate the data. +type T struct { + A string + B struct { + RenamedC int `yaml:"c"` + D []int `yaml:",flow"` + } +} + +func main() { + t := T{} + + err := yaml.Unmarshal([]byte(data), &t) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- t:\n%v\n\n", t) + + d, err := yaml.Marshal(&t) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- t dump:\n%s\n\n", string(d)) + + m := make(map[any]any) + + err = yaml.Unmarshal([]byte(data), &m) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- m:\n%v\n\n", m) + + d, err = yaml.Marshal(&m) + if err != nil { + log.Fatalf("error: %v", err) + } + fmt.Printf("--- m dump:\n%s\n\n", string(d)) +} +``` + +This example will generate the following output: + +``` +--- t: +{Easy! {2 [3 4]}} + +--- t dump: +a: Easy! +b: + c: 2 + d: [3, 4] + + +--- m: +map[a:Easy! b:map[c:2 d:[3 4]]] + +--- m dump: +a: Easy! +b: + c: 2 + d: + - 3 + - 4 +``` + + +## Testing with `make` + +Running `make test` in this directory should just work. +You don't need to have `go` installed and even if you do the `GNUmakefile` will +ignore it and setup / cache its own version under `.cache/`. + +The only things you need are: +* Linux or macOS +* `git` +* `bash` +* `curl` +* `make` + +Some `make` commands are: + +* `make test` +* `make test GO-VERSION=1.2.34` +* `make shell` Start a shell with the local `go` environment +* `make shell GO-VERSION=1.2.34` +* `make distclean` - Removes `.cache/` + + +## The `go-yaml` CLI Tool + +This repository includes a `go-yaml` CLI tool which can be used to understand +the internal stages and final results of YAML processing with the go-yaml +library. + +```bash +make go-yaml +./go-yaml --help +./go-yaml -t <<< ' +foo: &a1 bar +*a1: baz +' +``` + +You can also install it with: + +```bash +go install go.yaml.in/yaml/v4/cmd/go-yaml@latest +``` + + +## License + +The yaml package is licensed under the MIT and Apache License 2.0 licenses. +Please see the LICENSE file for details. diff --git a/vendor/go.yaml.in/yaml/v4/decode.go b/vendor/go.yaml.in/yaml/v4/decode.go new file mode 100644 index 00000000..b5423d1c --- /dev/null +++ b/vendor/go.yaml.in/yaml/v4/decode.go @@ -0,0 +1,1042 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package yaml + +import ( + "encoding" + "encoding/base64" + "fmt" + "io" + "math" + "reflect" + "time" + + "go.yaml.in/yaml/v4/internal/libyaml" +) + +// ---------------------------------------------------------------------------- +// Parser, produces a node tree out of a libyaml event stream. + +type parser struct { + parser libyaml.Parser + event libyaml.Event + doc *Node + anchors map[string]*Node + doneInit bool + textless bool +} + +func newParser(b []byte) *parser { + p := parser{ + parser: libyaml.NewParser(), + } + if len(b) == 0 { + b = []byte{'\n'} + } + p.parser.SetInputString(b) + return &p +} + +func newParserFromReader(r io.Reader) *parser { + p := parser{ + parser: libyaml.NewParser(), + } + p.parser.SetInputReader(r) + return &p +} + +func (p *parser) init() { + if p.doneInit { + return + } + p.anchors = make(map[string]*Node) + p.expect(libyaml.STREAM_START_EVENT) + p.doneInit = true +} + +func (p *parser) destroy() { + if p.event.Type != libyaml.NO_EVENT { + p.event.Delete() + } + p.parser.Delete() +} + +// expect consumes an event from the event stream and +// checks that it's of the expected type. +func (p *parser) expect(e libyaml.EventType) { + if p.event.Type == libyaml.NO_EVENT { + if !p.parser.Parse(&p.event) { + p.fail() + } + } + if p.event.Type == libyaml.STREAM_END_EVENT { + failf("attempted to go past the end of stream; corrupted value?") + } + if p.event.Type != e { + p.parser.Problem = fmt.Sprintf("expected %s event but got %s", e, p.event.Type) + p.fail() + } + p.event.Delete() + p.event.Type = libyaml.NO_EVENT +} + +// peek peeks at the next event in the event stream, +// puts the results into p.event and returns the event type. +func (p *parser) peek() libyaml.EventType { + if p.event.Type != libyaml.NO_EVENT { + return p.event.Type + } + // It's curious choice from the underlying API to generally return a + // positive result on success, but on this case return true in an error + // scenario. This was the source of bugs in the past (issue #666). + if !p.parser.Parse(&p.event) || p.parser.ErrorType != libyaml.NO_ERROR { + p.fail() + } + return p.event.Type +} + +func (p *parser) fail() { + var line int + if p.parser.ContextMark.Line != 0 { + line = p.parser.ContextMark.Line + // Scanner errors don't iterate line before returning error + if p.parser.ErrorType == libyaml.SCANNER_ERROR { + line++ + } + } else if p.parser.ProblemMark.Line != 0 { + line = p.parser.ProblemMark.Line + // Scanner errors don't iterate line before returning error + if p.parser.ErrorType == libyaml.SCANNER_ERROR { + line++ + } + } + var column int + if p.parser.ContextMark.Column != 0 { + column = p.parser.ContextMark.Column + } else if p.parser.ProblemMark.Column != 0 { + column = p.parser.ProblemMark.Column + } + var msg string + if len(p.parser.Problem) > 0 { + msg = p.parser.Problem + } else { + msg = "unknown problem parsing YAML content" + } + fail(&ParserError{msg, line, column}) +} + +func (p *parser) anchor(n *Node, anchor []byte) { + if anchor != nil { + n.Anchor = string(anchor) + p.anchors[n.Anchor] = n + } +} + +func (p *parser) parse() *Node { + p.init() + switch p.peek() { + case libyaml.SCALAR_EVENT: + return p.scalar() + case libyaml.ALIAS_EVENT: + return p.alias() + case libyaml.MAPPING_START_EVENT: + return p.mapping() + case libyaml.SEQUENCE_START_EVENT: + return p.sequence() + case libyaml.DOCUMENT_START_EVENT: + return p.document() + case libyaml.STREAM_END_EVENT: + // Happens when attempting to decode an empty buffer. + return nil + case libyaml.TAIL_COMMENT_EVENT: + panic("internal error: unexpected tail comment event (please report)") + default: + panic("internal error: attempted to parse unknown event (please report): " + p.event.Type.String()) + } +} + +func (p *parser) node(kind Kind, defaultTag, tag, value string) *Node { + var style Style + if tag != "" && tag != "!" { + tag = shortTag(tag) + style = TaggedStyle + } else if defaultTag != "" { + tag = defaultTag + } else if kind == ScalarNode { + tag, _ = resolve("", value) + } + n := &Node{ + Kind: kind, + Tag: tag, + Value: value, + Style: style, + } + if !p.textless { + n.Line = p.event.StartMark.Line + 1 + n.Column = p.event.StartMark.Column + 1 + n.HeadComment = string(p.event.HeadComment) + n.LineComment = string(p.event.LineComment) + n.FootComment = string(p.event.FootComment) + } + return n +} + +func (p *parser) parseChild(parent *Node) *Node { + child := p.parse() + parent.Content = append(parent.Content, child) + return child +} + +func (p *parser) document() *Node { + n := p.node(DocumentNode, "", "", "") + p.doc = n + p.expect(libyaml.DOCUMENT_START_EVENT) + p.parseChild(n) + if p.peek() == libyaml.DOCUMENT_END_EVENT { + n.FootComment = string(p.event.FootComment) + } + p.expect(libyaml.DOCUMENT_END_EVENT) + return n +} + +func (p *parser) alias() *Node { + n := p.node(AliasNode, "", "", string(p.event.Anchor)) + n.Alias = p.anchors[n.Value] + if n.Alias == nil { + failf("unknown anchor '%s' referenced", n.Value) + } + p.expect(libyaml.ALIAS_EVENT) + return n +} + +func (p *parser) scalar() *Node { + parsedStyle := p.event.ScalarStyle() + var nodeStyle Style + switch { + case parsedStyle&libyaml.DOUBLE_QUOTED_SCALAR_STYLE != 0: + nodeStyle = DoubleQuotedStyle + case parsedStyle&libyaml.SINGLE_QUOTED_SCALAR_STYLE != 0: + nodeStyle = SingleQuotedStyle + case parsedStyle&libyaml.LITERAL_SCALAR_STYLE != 0: + nodeStyle = LiteralStyle + case parsedStyle&libyaml.FOLDED_SCALAR_STYLE != 0: + nodeStyle = FoldedStyle + } + nodeValue := string(p.event.Value) + nodeTag := string(p.event.Tag) + var defaultTag string + if nodeStyle == 0 { + if nodeValue == "<<" { + defaultTag = mergeTag + } + } else { + defaultTag = strTag + } + n := p.node(ScalarNode, defaultTag, nodeTag, nodeValue) + n.Style |= nodeStyle + p.anchor(n, p.event.Anchor) + p.expect(libyaml.SCALAR_EVENT) + return n +} + +func (p *parser) sequence() *Node { + n := p.node(SequenceNode, seqTag, string(p.event.Tag), "") + if p.event.SequenceStyle()&libyaml.FLOW_SEQUENCE_STYLE != 0 { + n.Style |= FlowStyle + } + p.anchor(n, p.event.Anchor) + p.expect(libyaml.SEQUENCE_START_EVENT) + for p.peek() != libyaml.SEQUENCE_END_EVENT { + p.parseChild(n) + } + n.LineComment = string(p.event.LineComment) + n.FootComment = string(p.event.FootComment) + p.expect(libyaml.SEQUENCE_END_EVENT) + return n +} + +func (p *parser) mapping() *Node { + n := p.node(MappingNode, mapTag, string(p.event.Tag), "") + block := true + if p.event.MappingStyle()&libyaml.FLOW_MAPPING_STYLE != 0 { + block = false + n.Style |= FlowStyle + } + p.anchor(n, p.event.Anchor) + p.expect(libyaml.MAPPING_START_EVENT) + for p.peek() != libyaml.MAPPING_END_EVENT { + k := p.parseChild(n) + if block && k.FootComment != "" { + // Must be a foot comment for the prior value when being dedented. + if len(n.Content) > 2 { + n.Content[len(n.Content)-3].FootComment = k.FootComment + k.FootComment = "" + } + } + v := p.parseChild(n) + if k.FootComment == "" && v.FootComment != "" { + k.FootComment = v.FootComment + v.FootComment = "" + } + if p.peek() == libyaml.TAIL_COMMENT_EVENT { + if k.FootComment == "" { + k.FootComment = string(p.event.FootComment) + } + p.expect(libyaml.TAIL_COMMENT_EVENT) + } + } + n.LineComment = string(p.event.LineComment) + n.FootComment = string(p.event.FootComment) + if n.Style&FlowStyle == 0 && n.FootComment != "" && len(n.Content) > 1 { + n.Content[len(n.Content)-2].FootComment = n.FootComment + n.FootComment = "" + } + p.expect(libyaml.MAPPING_END_EVENT) + return n +} + +// ---------------------------------------------------------------------------- +// Decoder, unmarshals a node into a provided value. + +type decoder struct { + doc *Node + aliases map[*Node]bool + terrors []*UnmarshalError + + stringMapType reflect.Type + generalMapType reflect.Type + + knownFields bool + uniqueKeys bool + decodeCount int + aliasCount int + aliasDepth int + + mergedFields map[any]bool +} + +var ( + nodeType = reflect.TypeOf(Node{}) + durationType = reflect.TypeOf(time.Duration(0)) + stringMapType = reflect.TypeOf(map[string]any{}) + generalMapType = reflect.TypeOf(map[any]any{}) + ifaceType = generalMapType.Elem() +) + +func newDecoder() *decoder { + d := &decoder{ + stringMapType: stringMapType, + generalMapType: generalMapType, + uniqueKeys: true, + } + d.aliases = make(map[*Node]bool) + return d +} + +func (d *decoder) terror(n *Node, tag string, out reflect.Value) { + if n.Tag != "" { + tag = n.Tag + } + value := n.Value + if tag != seqTag && tag != mapTag { + if len(value) > 10 { + value = " `" + value[:7] + "...`" + } else { + value = " `" + value + "`" + } + } + d.terrors = append(d.terrors, &UnmarshalError{ + Err: fmt.Errorf("cannot unmarshal %s%s into %s", shortTag(tag), value, out.Type()), + Line: n.Line, + Column: n.Column, + }) +} + +func (d *decoder) callUnmarshaler(n *Node, u Unmarshaler) (good bool) { + err := u.UnmarshalYAML(n) + switch e := err.(type) { + case nil: + return true + case *TypeError: + d.terrors = append(d.terrors, e.Errors...) + return false + default: + d.terrors = append(d.terrors, &UnmarshalError{ + Err: err, + Line: n.Line, + Column: n.Column, + }) + return false + } +} + +func (d *decoder) callObsoleteUnmarshaler(n *Node, u obsoleteUnmarshaler) (good bool) { + terrlen := len(d.terrors) + err := u.UnmarshalYAML(func(v any) (err error) { + defer handleErr(&err) + d.unmarshal(n, reflect.ValueOf(v)) + if len(d.terrors) > terrlen { + issues := d.terrors[terrlen:] + d.terrors = d.terrors[:terrlen] + return &TypeError{issues} + } + return nil + }) + switch e := err.(type) { + case nil: + return true + case *TypeError: + d.terrors = append(d.terrors, e.Errors...) + return false + default: + d.terrors = append(d.terrors, &UnmarshalError{ + Err: err, + Line: n.Line, + Column: n.Column, + }) + return false + } +} + +// d.prepare initializes and dereferences pointers and calls UnmarshalYAML +// if a value is found to implement it. +// It returns the initialized and dereferenced out value, whether +// unmarshalling was already done by UnmarshalYAML, and if so whether +// its types unmarshalled appropriately. +// +// If n holds a null value, prepare returns before doing anything. +func (d *decoder) prepare(n *Node, out reflect.Value) (newout reflect.Value, unmarshaled, good bool) { + if n.ShortTag() == nullTag { + return out, false, false + } + again := true + for again { + again = false + if out.Kind() == reflect.Pointer { + if out.IsNil() { + out.Set(reflect.New(out.Type().Elem())) + } + out = out.Elem() + again = true + } + if out.CanAddr() { + outi := out.Addr().Interface() + if u, ok := outi.(Unmarshaler); ok { + good = d.callUnmarshaler(n, u) + return out, true, good + } + if u, ok := outi.(obsoleteUnmarshaler); ok { + good = d.callObsoleteUnmarshaler(n, u) + return out, true, good + } + } + } + return out, false, false +} + +func (d *decoder) fieldByIndex(n *Node, v reflect.Value, index []int) (field reflect.Value) { + if n.ShortTag() == nullTag { + return reflect.Value{} + } + for _, num := range index { + for { + if v.Kind() == reflect.Pointer { + if v.IsNil() { + v.Set(reflect.New(v.Type().Elem())) + } + v = v.Elem() + continue + } + break + } + v = v.Field(num) + } + return v +} + +const ( + // 400,000 decode operations is ~500kb of dense object declarations, or + // ~5kb of dense object declarations with 10000% alias expansion + alias_ratio_range_low = 400000 + + // 4,000,000 decode operations is ~5MB of dense object declarations, or + // ~4.5MB of dense object declarations with 10% alias expansion + alias_ratio_range_high = 4000000 + + // alias_ratio_range is the range over which we scale allowed alias ratios + alias_ratio_range = float64(alias_ratio_range_high - alias_ratio_range_low) +) + +func allowedAliasRatio(decodeCount int) float64 { + switch { + case decodeCount <= alias_ratio_range_low: + // allow 99% to come from alias expansion for small-to-medium documents + return 0.99 + case decodeCount >= alias_ratio_range_high: + // allow 10% to come from alias expansion for very large documents + return 0.10 + default: + // scale smoothly from 99% down to 10% over the range. + // this maps to 396,000 - 400,000 allowed alias-driven decodes over the range. + // 400,000 decode operations is ~100MB of allocations in worst-case scenarios (single-item maps). + return 0.99 - 0.89*(float64(decodeCount-alias_ratio_range_low)/alias_ratio_range) + } +} + +func (d *decoder) unmarshal(n *Node, out reflect.Value) (good bool) { + d.decodeCount++ + if d.aliasDepth > 0 { + d.aliasCount++ + } + if d.aliasCount > 100 && d.decodeCount > 1000 && float64(d.aliasCount)/float64(d.decodeCount) > allowedAliasRatio(d.decodeCount) { + failf("document contains excessive aliasing") + } + if out.Type() == nodeType { + out.Set(reflect.ValueOf(n).Elem()) + return true + } + switch n.Kind { + case DocumentNode: + return d.document(n, out) + case AliasNode: + return d.alias(n, out) + } + out, unmarshaled, good := d.prepare(n, out) + if unmarshaled { + return good + } + switch n.Kind { + case ScalarNode: + good = d.scalar(n, out) + case MappingNode: + good = d.mapping(n, out) + case SequenceNode: + good = d.sequence(n, out) + case 0: + if n.IsZero() { + return d.null(out) + } + fallthrough + default: + failf("cannot decode node with unknown kind %d", n.Kind) + } + return good +} + +func (d *decoder) document(n *Node, out reflect.Value) (good bool) { + if len(n.Content) == 1 { + d.doc = n + d.unmarshal(n.Content[0], out) + return true + } + return false +} + +func (d *decoder) alias(n *Node, out reflect.Value) (good bool) { + if d.aliases[n] { + // TODO this could actually be allowed in some circumstances. + failf("anchor '%s' value contains itself", n.Value) + } + d.aliases[n] = true + d.aliasDepth++ + good = d.unmarshal(n.Alias, out) + d.aliasDepth-- + delete(d.aliases, n) + return good +} + +func (d *decoder) null(out reflect.Value) bool { + if out.CanAddr() { + switch out.Kind() { + case reflect.Interface, reflect.Pointer, reflect.Map, reflect.Slice: + out.Set(reflect.Zero(out.Type())) + return true + } + } + return false +} + +func (d *decoder) scalar(n *Node, out reflect.Value) bool { + var tag string + var resolved any + if n.indicatedString() { + tag = strTag + resolved = n.Value + } else { + tag, resolved = resolve(n.Tag, n.Value) + if tag == binaryTag { + data, err := base64.StdEncoding.DecodeString(resolved.(string)) + if err != nil { + failf("!!binary value contains invalid base64 data") + } + resolved = string(data) + } + } + if resolved == nil { + return d.null(out) + } + if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() { + // We've resolved to exactly the type we want, so use that. + out.Set(resolvedv) + return true + } + // Perhaps we can use the value as a TextUnmarshaler to + // set its value. + if out.CanAddr() { + u, ok := out.Addr().Interface().(encoding.TextUnmarshaler) + if ok { + var text []byte + if tag == binaryTag { + text = []byte(resolved.(string)) + } else { + // We let any value be unmarshaled into TextUnmarshaler. + // That might be more lax than we'd like, but the + // TextUnmarshaler itself should bowl out any dubious values. + text = []byte(n.Value) + } + err := u.UnmarshalText(text) + if err != nil { + d.terrors = append(d.terrors, &UnmarshalError{ + Err: err, + Line: n.Line, + Column: n.Column, + }) + return false + } + return true + } + } + switch out.Kind() { + case reflect.String: + if tag == binaryTag { + out.SetString(resolved.(string)) + return true + } + out.SetString(n.Value) + return true + case reflect.Interface: + out.Set(reflect.ValueOf(resolved)) + return true + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + // This used to work in v2, but it's very unfriendly. + isDuration := out.Type() == durationType + + switch resolved := resolved.(type) { + case int: + if !isDuration && !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + return true + } + case int64: + if !isDuration && !out.OverflowInt(resolved) { + out.SetInt(resolved) + return true + } + case uint64: + if !isDuration && resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + return true + } + case float64: + if !isDuration && resolved <= math.MaxInt64 && !out.OverflowInt(int64(resolved)) { + out.SetInt(int64(resolved)) + return true + } + case string: + if out.Type() == durationType { + d, err := time.ParseDuration(resolved) + if err == nil { + out.SetInt(int64(d)) + return true + } + } + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + switch resolved := resolved.(type) { + case int: + if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + case int64: + if resolved >= 0 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + case uint64: + if !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + case float64: + if resolved <= math.MaxUint64 && !out.OverflowUint(uint64(resolved)) { + out.SetUint(uint64(resolved)) + return true + } + } + case reflect.Bool: + switch resolved := resolved.(type) { + case bool: + out.SetBool(resolved) + return true + case string: + // This offers some compatibility with the 1.1 spec (https://yaml.org/type/bool.html). + // It only works if explicitly attempting to unmarshal into a typed bool value. + switch resolved { + case "y", "Y", "yes", "Yes", "YES", "on", "On", "ON": + out.SetBool(true) + return true + case "n", "N", "no", "No", "NO", "off", "Off", "OFF": + out.SetBool(false) + return true + } + } + case reflect.Float32, reflect.Float64: + switch resolved := resolved.(type) { + case int: + out.SetFloat(float64(resolved)) + return true + case int64: + out.SetFloat(float64(resolved)) + return true + case uint64: + out.SetFloat(float64(resolved)) + return true + case float64: + out.SetFloat(resolved) + return true + } + case reflect.Struct: + if resolvedv := reflect.ValueOf(resolved); out.Type() == resolvedv.Type() { + out.Set(resolvedv) + return true + } + case reflect.Pointer: + panic("yaml internal error: please report the issue") + } + d.terror(n, tag, out) + return false +} + +func settableValueOf(i any) reflect.Value { + v := reflect.ValueOf(i) + sv := reflect.New(v.Type()).Elem() + sv.Set(v) + return sv +} + +func (d *decoder) sequence(n *Node, out reflect.Value) (good bool) { + l := len(n.Content) + + var iface reflect.Value + switch out.Kind() { + case reflect.Slice: + out.Set(reflect.MakeSlice(out.Type(), l, l)) + case reflect.Array: + if l != out.Len() { + failf("invalid array: want %d elements but got %d", out.Len(), l) + } + case reflect.Interface: + // No type hints. Will have to use a generic sequence. + iface = out + out = settableValueOf(make([]any, l)) + default: + d.terror(n, seqTag, out) + return false + } + et := out.Type().Elem() + + j := 0 + for i := 0; i < l; i++ { + e := reflect.New(et).Elem() + if ok := d.unmarshal(n.Content[i], e); ok { + out.Index(j).Set(e) + j++ + } + } + if out.Kind() != reflect.Array { + out.Set(out.Slice(0, j)) + } + if iface.IsValid() { + iface.Set(out) + } + return true +} + +func (d *decoder) mapping(n *Node, out reflect.Value) (good bool) { + l := len(n.Content) + if d.uniqueKeys { + nerrs := len(d.terrors) + for i := 0; i < l; i += 2 { + ni := n.Content[i] + for j := i + 2; j < l; j += 2 { + nj := n.Content[j] + if ni.Kind == nj.Kind && ni.Value == nj.Value { + d.terrors = append(d.terrors, &UnmarshalError{ + Err: fmt.Errorf("mapping key %#v already defined at line %d", nj.Value, ni.Line), + Line: nj.Line, + Column: nj.Column, + }) + } + } + } + if len(d.terrors) > nerrs { + return false + } + } + switch out.Kind() { + case reflect.Struct: + return d.mappingStruct(n, out) + case reflect.Map: + // okay + case reflect.Interface: + iface := out + if isStringMap(n) { + out = reflect.MakeMap(d.stringMapType) + } else { + out = reflect.MakeMap(d.generalMapType) + } + iface.Set(out) + default: + d.terror(n, mapTag, out) + return false + } + + outt := out.Type() + kt := outt.Key() + et := outt.Elem() + + stringMapType := d.stringMapType + generalMapType := d.generalMapType + if outt.Elem() == ifaceType { + if outt.Key().Kind() == reflect.String { + d.stringMapType = outt + } else if outt.Key() == ifaceType { + d.generalMapType = outt + } + } + + mergedFields := d.mergedFields + d.mergedFields = nil + + var mergeNode *Node + + mapIsNew := false + if out.IsNil() { + out.Set(reflect.MakeMap(outt)) + mapIsNew = true + } + for i := 0; i < l; i += 2 { + if isMerge(n.Content[i]) { + mergeNode = n.Content[i+1] + continue + } + k := reflect.New(kt).Elem() + if d.unmarshal(n.Content[i], k) { + if mergedFields != nil { + ki := k.Interface() + if d.getPossiblyUnhashableKey(mergedFields, ki) { + continue + } + d.setPossiblyUnhashableKey(mergedFields, ki, true) + } + kkind := k.Kind() + if kkind == reflect.Interface { + kkind = k.Elem().Kind() + } + if kkind == reflect.Map || kkind == reflect.Slice { + failf("cannot use '%#v' as a map key; try decoding into yaml.Node", k.Interface()) + } + e := reflect.New(et).Elem() + if d.unmarshal(n.Content[i+1], e) || n.Content[i+1].ShortTag() == nullTag && (mapIsNew || !out.MapIndex(k).IsValid()) { + out.SetMapIndex(k, e) + } + } + } + + d.mergedFields = mergedFields + if mergeNode != nil { + d.merge(n, mergeNode, out) + } + + d.stringMapType = stringMapType + d.generalMapType = generalMapType + return true +} + +func isStringMap(n *Node) bool { + if n.Kind != MappingNode { + return false + } + l := len(n.Content) + for i := 0; i < l; i += 2 { + shortTag := n.Content[i].ShortTag() + if shortTag != strTag && shortTag != mergeTag { + return false + } + } + return true +} + +func (d *decoder) mappingStruct(n *Node, out reflect.Value) (good bool) { + sinfo, err := getStructInfo(out.Type()) + if err != nil { + panic(err) + } + + var inlineMap reflect.Value + var elemType reflect.Type + if sinfo.InlineMap != -1 { + inlineMap = out.Field(sinfo.InlineMap) + elemType = inlineMap.Type().Elem() + } + + for _, index := range sinfo.InlineUnmarshalers { + field := d.fieldByIndex(n, out, index) + d.prepare(n, field) + } + + mergedFields := d.mergedFields + d.mergedFields = nil + var mergeNode *Node + var doneFields []bool + if d.uniqueKeys { + doneFields = make([]bool, len(sinfo.FieldsList)) + } + name := settableValueOf("") + l := len(n.Content) + for i := 0; i < l; i += 2 { + ni := n.Content[i] + if isMerge(ni) { + mergeNode = n.Content[i+1] + continue + } + if !d.unmarshal(ni, name) { + continue + } + sname := name.String() + if mergedFields != nil { + if mergedFields[sname] { + continue + } + mergedFields[sname] = true + } + if info, ok := sinfo.FieldsMap[sname]; ok { + if d.uniqueKeys { + if doneFields[info.Id] { + d.terrors = append(d.terrors, &UnmarshalError{ + Err: fmt.Errorf("field %s already set in type %s", name.String(), out.Type()), + Line: ni.Line, + Column: ni.Column, + }) + continue + } + doneFields[info.Id] = true + } + var field reflect.Value + if info.Inline == nil { + field = out.Field(info.Num) + } else { + field = d.fieldByIndex(n, out, info.Inline) + } + d.unmarshal(n.Content[i+1], field) + } else if sinfo.InlineMap != -1 { + if inlineMap.IsNil() { + inlineMap.Set(reflect.MakeMap(inlineMap.Type())) + } + value := reflect.New(elemType).Elem() + d.unmarshal(n.Content[i+1], value) + inlineMap.SetMapIndex(name, value) + } else if d.knownFields { + d.terrors = append(d.terrors, &UnmarshalError{ + Err: fmt.Errorf("field %s not found in type %s", name.String(), out.Type()), + Line: ni.Line, + Column: ni.Column, + }) + } + } + + d.mergedFields = mergedFields + if mergeNode != nil { + d.merge(n, mergeNode, out) + } + return true +} + +func failWantMap() { + failf("map merge requires map or sequence of maps as the value") +} + +func (d *decoder) setPossiblyUnhashableKey(m map[any]bool, key any, value bool) { + defer func() { + if err := recover(); err != nil { + failf("%v", err) + } + }() + m[key] = value +} + +func (d *decoder) getPossiblyUnhashableKey(m map[any]bool, key any) bool { + defer func() { + if err := recover(); err != nil { + failf("%v", err) + } + }() + return m[key] +} + +func (d *decoder) merge(parent *Node, merge *Node, out reflect.Value) { + mergedFields := d.mergedFields + if mergedFields == nil { + d.mergedFields = make(map[any]bool) + for i := 0; i < len(parent.Content); i += 2 { + k := reflect.New(ifaceType).Elem() + if d.unmarshal(parent.Content[i], k) { + d.setPossiblyUnhashableKey(d.mergedFields, k.Interface(), true) + } + } + } + + switch merge.Kind { + case MappingNode: + d.unmarshal(merge, out) + case AliasNode: + if merge.Alias != nil && merge.Alias.Kind != MappingNode { + failWantMap() + } + d.unmarshal(merge, out) + case SequenceNode: + for i := 0; i < len(merge.Content); i++ { + ni := merge.Content[i] + if ni.Kind == AliasNode { + if ni.Alias != nil && ni.Alias.Kind != MappingNode { + failWantMap() + } + } else if ni.Kind != MappingNode { + failWantMap() + } + d.unmarshal(ni, out) + } + default: + failWantMap() + } + + d.mergedFields = mergedFields +} + +func isMerge(n *Node) bool { + return n.Kind == ScalarNode && n.Value == "<<" && (n.Tag == "" || n.Tag == "!" || shortTag(n.Tag) == mergeTag) +} diff --git a/vendor/go.yaml.in/yaml/v4/encode.go b/vendor/go.yaml.in/yaml/v4/encode.go new file mode 100644 index 00000000..ce66dee9 --- /dev/null +++ b/vendor/go.yaml.in/yaml/v4/encode.go @@ -0,0 +1,592 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package yaml + +import ( + "encoding" + "fmt" + "io" + "reflect" + "regexp" + "sort" + "strconv" + "strings" + "time" + "unicode/utf8" + + "go.yaml.in/yaml/v4/internal/libyaml" +) + +type encoder struct { + emitter libyaml.Emitter + event libyaml.Event + out []byte + flow bool + indent int + doneInit bool +} + +func newEncoder() *encoder { + e := &encoder{ + emitter: libyaml.NewEmitter(), + } + e.emitter.SetOutputString(&e.out) + e.emitter.SetUnicode(true) + return e +} + +func newEncoderWithWriter(w io.Writer) *encoder { + e := &encoder{ + emitter: libyaml.NewEmitter(), + } + e.emitter.SetOutputWriter(w) + e.emitter.SetUnicode(true) + return e +} + +func (e *encoder) init() { + if e.doneInit { + return + } + if e.indent == 0 { + e.indent = 4 + } + e.emitter.BestIndent = e.indent + e.event = libyaml.NewStreamStartEvent(libyaml.UTF8_ENCODING) + e.emit() + e.doneInit = true +} + +func (e *encoder) finish() { + e.emitter.OpenEnded = false + e.event = libyaml.NewStreamEndEvent() + e.emit() +} + +func (e *encoder) destroy() { + e.emitter.Delete() +} + +func (e *encoder) emit() { + // This will internally delete the e.event value. + e.must(e.emitter.Emit(&e.event)) +} + +func (e *encoder) must(err error) { + if err != nil { + msg := err.Error() + if msg == "" { + msg = "unknown problem generating YAML content" + } + failf("%s", msg) + } +} + +func (e *encoder) marshalDoc(tag string, in reflect.Value) { + e.init() + var node *Node + if in.IsValid() { + node, _ = in.Interface().(*Node) + } + if node != nil && node.Kind == DocumentNode { + e.nodev(in) + } else { + e.event = libyaml.NewDocumentStartEvent(nil, nil, true) + e.emit() + e.marshal(tag, in) + e.event = libyaml.NewDocumentEndEvent(true) + e.emit() + } +} + +func (e *encoder) marshal(tag string, in reflect.Value) { + tag = shortTag(tag) + if !in.IsValid() || in.Kind() == reflect.Pointer && in.IsNil() { + e.nilv() + return + } + iface := in.Interface() + switch value := iface.(type) { + case *Node: + e.nodev(in) + return + case Node: + if !in.CanAddr() { + n := reflect.New(in.Type()).Elem() + n.Set(in) + in = n + } + e.nodev(in.Addr()) + return + case time.Time: + e.timev(tag, in) + return + case *time.Time: + e.timev(tag, in.Elem()) + return + case time.Duration: + e.stringv(tag, reflect.ValueOf(value.String())) + return + case Marshaler: + v, err := value.MarshalYAML() + if err != nil { + fail(err) + } + if v == nil { + e.nilv() + return + } + e.marshal(tag, reflect.ValueOf(v)) + return + case encoding.TextMarshaler: + text, err := value.MarshalText() + if err != nil { + fail(err) + } + in = reflect.ValueOf(string(text)) + case nil: + e.nilv() + return + } + switch in.Kind() { + case reflect.Interface: + e.marshal(tag, in.Elem()) + case reflect.Map: + e.mapv(tag, in) + case reflect.Pointer: + e.marshal(tag, in.Elem()) + case reflect.Struct: + e.structv(tag, in) + case reflect.Slice, reflect.Array: + e.slicev(tag, in) + case reflect.String: + e.stringv(tag, in) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + e.intv(tag, in) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + e.uintv(tag, in) + case reflect.Float32, reflect.Float64: + e.floatv(tag, in) + case reflect.Bool: + e.boolv(tag, in) + default: + panic("cannot marshal type: " + in.Type().String()) + } +} + +func (e *encoder) mapv(tag string, in reflect.Value) { + e.mappingv(tag, func() { + keys := keyList(in.MapKeys()) + sort.Sort(keys) + for _, k := range keys { + e.marshal("", k) + e.marshal("", in.MapIndex(k)) + } + }) +} + +func (e *encoder) fieldByIndex(v reflect.Value, index []int) (field reflect.Value) { + for _, num := range index { + for { + if v.Kind() == reflect.Pointer { + if v.IsNil() { + return reflect.Value{} + } + v = v.Elem() + continue + } + break + } + v = v.Field(num) + } + return v +} + +func (e *encoder) structv(tag string, in reflect.Value) { + sinfo, err := getStructInfo(in.Type()) + if err != nil { + panic(err) + } + e.mappingv(tag, func() { + for _, info := range sinfo.FieldsList { + var value reflect.Value + if info.Inline == nil { + value = in.Field(info.Num) + } else { + value = e.fieldByIndex(in, info.Inline) + if !value.IsValid() { + continue + } + } + if info.OmitEmpty && isZero(value) { + continue + } + e.marshal("", reflect.ValueOf(info.Key)) + e.flow = info.Flow + e.marshal("", value) + } + if sinfo.InlineMap >= 0 { + m := in.Field(sinfo.InlineMap) + if m.Len() > 0 { + e.flow = false + keys := keyList(m.MapKeys()) + sort.Sort(keys) + for _, k := range keys { + if _, found := sinfo.FieldsMap[k.String()]; found { + panic(fmt.Sprintf("cannot have key %q in inlined map: conflicts with struct field", k.String())) + } + e.marshal("", k) + e.flow = false + e.marshal("", m.MapIndex(k)) + } + } + } + }) +} + +func (e *encoder) mappingv(tag string, f func()) { + implicit := tag == "" + style := libyaml.BLOCK_MAPPING_STYLE + if e.flow { + e.flow = false + style = libyaml.FLOW_MAPPING_STYLE + } + e.event = libyaml.NewMappingStartEvent(nil, []byte(tag), implicit, style) + e.emit() + f() + e.event = libyaml.NewMappingEndEvent() + e.emit() +} + +func (e *encoder) slicev(tag string, in reflect.Value) { + implicit := tag == "" + style := libyaml.BLOCK_SEQUENCE_STYLE + if e.flow { + e.flow = false + style = libyaml.FLOW_SEQUENCE_STYLE + } + e.event = libyaml.NewSequenceStartEvent(nil, []byte(tag), implicit, style) + e.emit() + n := in.Len() + for i := 0; i < n; i++ { + e.marshal("", in.Index(i)) + } + e.event = libyaml.NewSequenceEndEvent() + e.emit() +} + +// isBase60 returns whether s is in base 60 notation as defined in YAML 1.1. +// +// The base 60 float notation in YAML 1.1 is a terrible idea and is unsupported +// in YAML 1.2 and by this package, but these should be marshaled quoted for +// the time being for compatibility with other parsers. +func isBase60Float(s string) (result bool) { + // Fast path. + if s == "" { + return false + } + c := s[0] + if !(c == '+' || c == '-' || c >= '0' && c <= '9') || strings.IndexByte(s, ':') < 0 { + return false + } + // Do the full match. + return base60float.MatchString(s) +} + +// From http://yaml.org/type/float.html, except the regular expression there +// is bogus. In practice parsers do not enforce the "\.[0-9_]*" suffix. +var base60float = regexp.MustCompile(`^[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+(?:\.[0-9_]*)?$`) + +// isOldBool returns whether s is bool notation as defined in YAML 1.1. +// +// We continue to force strings that YAML 1.1 would interpret as booleans to be +// rendered as quotes strings so that the marshaled output valid for YAML 1.1 +// parsing. +func isOldBool(s string) (result bool) { + switch s { + case "y", "Y", "yes", "Yes", "YES", "on", "On", "ON", + "n", "N", "no", "No", "NO", "off", "Off", "OFF": + return true + default: + return false + } +} + +// looksLikeMerge returns true if the given string is the merge indicator "<<". +// +// When encoding a scalar with this exact value, it must be quoted to prevent it +// from being interpreted as a merge indicator during decoding. +func looksLikeMerge(s string) (result bool) { + return s == "<<" +} + +func (e *encoder) stringv(tag string, in reflect.Value) { + var style libyaml.ScalarStyle + s := in.String() + canUsePlain := true + switch { + case !utf8.ValidString(s): + if tag == binaryTag { + failf("explicitly tagged !!binary data must be base64-encoded") + } + if tag != "" { + failf("cannot marshal invalid UTF-8 data as %s", shortTag(tag)) + } + // It can't be encoded directly as YAML so use a binary tag + // and encode it as base64. + tag = binaryTag + s = encodeBase64(s) + case tag == "": + // Check to see if it would resolve to a specific + // tag when encoded unquoted. If it doesn't, + // there's no need to quote it. + rtag, _ := resolve("", s) + canUsePlain = rtag == strTag && + !(isBase60Float(s) || + isOldBool(s) || + looksLikeMerge(s)) + } + // Note: it's possible for user code to emit invalid YAML + // if they explicitly specify a tag and a string containing + // text that's incompatible with that tag. + switch { + case strings.Contains(s, "\n"): + if e.flow || !shouldUseLiteralStyle(s) { + style = libyaml.DOUBLE_QUOTED_SCALAR_STYLE + } else { + style = libyaml.LITERAL_SCALAR_STYLE + } + case canUsePlain: + style = libyaml.PLAIN_SCALAR_STYLE + default: + style = libyaml.DOUBLE_QUOTED_SCALAR_STYLE + } + e.emitScalar(s, "", tag, style, nil, nil, nil, nil) +} + +func (e *encoder) boolv(tag string, in reflect.Value) { + var s string + if in.Bool() { + s = "true" + } else { + s = "false" + } + e.emitScalar(s, "", tag, libyaml.PLAIN_SCALAR_STYLE, nil, nil, nil, nil) +} + +func (e *encoder) intv(tag string, in reflect.Value) { + s := strconv.FormatInt(in.Int(), 10) + e.emitScalar(s, "", tag, libyaml.PLAIN_SCALAR_STYLE, nil, nil, nil, nil) +} + +func (e *encoder) uintv(tag string, in reflect.Value) { + s := strconv.FormatUint(in.Uint(), 10) + e.emitScalar(s, "", tag, libyaml.PLAIN_SCALAR_STYLE, nil, nil, nil, nil) +} + +func (e *encoder) timev(tag string, in reflect.Value) { + t := in.Interface().(time.Time) + s := t.Format(time.RFC3339Nano) + e.emitScalar(s, "", tag, libyaml.PLAIN_SCALAR_STYLE, nil, nil, nil, nil) +} + +func (e *encoder) floatv(tag string, in reflect.Value) { + // Issue #352: When formatting, use the precision of the underlying value + precision := 64 + if in.Kind() == reflect.Float32 { + precision = 32 + } + + s := strconv.FormatFloat(in.Float(), 'g', -1, precision) + switch s { + case "+Inf": + s = ".inf" + case "-Inf": + s = "-.inf" + case "NaN": + s = ".nan" + } + e.emitScalar(s, "", tag, libyaml.PLAIN_SCALAR_STYLE, nil, nil, nil, nil) +} + +func (e *encoder) nilv() { + e.emitScalar("null", "", "", libyaml.PLAIN_SCALAR_STYLE, nil, nil, nil, nil) +} + +func (e *encoder) emitScalar(value, anchor, tag string, style libyaml.ScalarStyle, head, line, foot, tail []byte) { + // TODO Kill this function. Replace all initialize calls by their underlining Go literals. + implicit := tag == "" + if !implicit { + tag = longTag(tag) + } + e.event = libyaml.NewScalarEvent([]byte(anchor), []byte(tag), []byte(value), implicit, implicit, style) + e.event.HeadComment = head + e.event.LineComment = line + e.event.FootComment = foot + e.event.TailComment = tail + e.emit() +} + +func (e *encoder) nodev(in reflect.Value) { + e.node(in.Interface().(*Node), "") +} + +func (e *encoder) node(node *Node, tail string) { + // Zero nodes behave as nil. + if node.Kind == 0 && node.IsZero() { + e.nilv() + return + } + + // If the tag was not explicitly requested, and dropping it won't change the + // implicit tag of the value, don't include it in the presentation. + tag := node.Tag + stag := shortTag(tag) + var forceQuoting bool + if tag != "" && node.Style&TaggedStyle == 0 { + if node.Kind == ScalarNode { + if stag == strTag && node.Style&(SingleQuotedStyle|DoubleQuotedStyle|LiteralStyle|FoldedStyle) != 0 { + tag = "" + } else { + rtag, _ := resolve("", node.Value) + if rtag == stag { + tag = "" + } else if stag == strTag { + tag = "" + forceQuoting = true + } + } + } else { + var rtag string + switch node.Kind { + case MappingNode: + rtag = mapTag + case SequenceNode: + rtag = seqTag + } + if rtag == stag { + tag = "" + } + } + } + + switch node.Kind { + case DocumentNode: + e.event = libyaml.NewDocumentStartEvent(nil, nil, true) + e.event.HeadComment = []byte(node.HeadComment) + e.emit() + for _, node := range node.Content { + e.node(node, "") + } + e.event = libyaml.NewDocumentEndEvent(true) + e.event.FootComment = []byte(node.FootComment) + e.emit() + + case SequenceNode: + style := libyaml.BLOCK_SEQUENCE_STYLE + if node.Style&FlowStyle != 0 { + style = libyaml.FLOW_SEQUENCE_STYLE + } + e.event = libyaml.NewSequenceStartEvent([]byte(node.Anchor), []byte(longTag(tag)), tag == "", style) + e.event.HeadComment = []byte(node.HeadComment) + e.emit() + for _, node := range node.Content { + e.node(node, "") + } + e.event = libyaml.NewSequenceEndEvent() + e.event.LineComment = []byte(node.LineComment) + e.event.FootComment = []byte(node.FootComment) + e.emit() + + case MappingNode: + style := libyaml.BLOCK_MAPPING_STYLE + if node.Style&FlowStyle != 0 { + style = libyaml.FLOW_MAPPING_STYLE + } + e.event = libyaml.NewMappingStartEvent([]byte(node.Anchor), []byte(longTag(tag)), tag == "", style) + e.event.TailComment = []byte(tail) + e.event.HeadComment = []byte(node.HeadComment) + e.emit() + + // The tail logic below moves the foot comment of prior keys to the following key, + // since the value for each key may be a nested structure and the foot needs to be + // processed only the entirety of the value is streamed. The last tail is processed + // with the mapping end event. + var tail string + for i := 0; i+1 < len(node.Content); i += 2 { + k := node.Content[i] + foot := k.FootComment + if foot != "" { + kopy := *k + kopy.FootComment = "" + k = &kopy + } + e.node(k, tail) + tail = foot + + v := node.Content[i+1] + e.node(v, "") + } + + e.event = libyaml.NewMappingEndEvent() + e.event.TailComment = []byte(tail) + e.event.LineComment = []byte(node.LineComment) + e.event.FootComment = []byte(node.FootComment) + e.emit() + + case AliasNode: + e.event = libyaml.NewAliasEvent([]byte(node.Value)) + e.event.HeadComment = []byte(node.HeadComment) + e.event.LineComment = []byte(node.LineComment) + e.event.FootComment = []byte(node.FootComment) + e.emit() + + case ScalarNode: + value := node.Value + if !utf8.ValidString(value) { + if stag == binaryTag { + failf("explicitly tagged !!binary data must be base64-encoded") + } + if stag != "" { + failf("cannot marshal invalid UTF-8 data as %s", stag) + } + // It can't be encoded directly as YAML so use a binary tag + // and encode it as base64. + tag = binaryTag + value = encodeBase64(value) + } + + style := libyaml.PLAIN_SCALAR_STYLE + switch { + case node.Style&DoubleQuotedStyle != 0: + style = libyaml.DOUBLE_QUOTED_SCALAR_STYLE + case node.Style&SingleQuotedStyle != 0: + style = libyaml.SINGLE_QUOTED_SCALAR_STYLE + case node.Style&LiteralStyle != 0: + style = libyaml.LITERAL_SCALAR_STYLE + case node.Style&FoldedStyle != 0: + style = libyaml.FOLDED_SCALAR_STYLE + case strings.Contains(value, "\n"): + style = libyaml.LITERAL_SCALAR_STYLE + case forceQuoting: + style = libyaml.DOUBLE_QUOTED_SCALAR_STYLE + } + + e.emitScalar(value, node.Anchor, tag, style, []byte(node.HeadComment), []byte(node.LineComment), []byte(node.FootComment), []byte(tail)) + default: + failf("cannot encode node with unknown kind %d", node.Kind) + } +} diff --git a/vendor/go.yaml.in/yaml/v4/internal/libyaml/api.go b/vendor/go.yaml.in/yaml/v4/internal/libyaml/api.go new file mode 100644 index 00000000..dbd94647 --- /dev/null +++ b/vendor/go.yaml.in/yaml/v4/internal/libyaml/api.go @@ -0,0 +1,737 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package libyaml + +import ( + "io" +) + +func (parser *Parser) insertToken(pos int, token *Token) { + // fmt.Println("yaml_insert_token", "pos:", pos, "typ:", token.typ, "head:", parser.tokens_head, "len:", len(parser.tokens)) + + // Check if we can move the queue at the beginning of the buffer. + if parser.tokens_head > 0 && len(parser.tokens) == cap(parser.tokens) { + if parser.tokens_head != len(parser.tokens) { + copy(parser.tokens, parser.tokens[parser.tokens_head:]) + } + parser.tokens = parser.tokens[:len(parser.tokens)-parser.tokens_head] + parser.tokens_head = 0 + } + parser.tokens = append(parser.tokens, *token) + if pos < 0 { + return + } + copy(parser.tokens[parser.tokens_head+pos+1:], parser.tokens[parser.tokens_head+pos:]) + parser.tokens[parser.tokens_head+pos] = *token +} + +// NewParser creates a new parser object. +func NewParser() Parser { + return Parser{ + raw_buffer: make([]byte, 0, input_raw_buffer_size), + buffer: make([]byte, 0, input_buffer_size), + } +} + +// Delete a parser object. +func (parser *Parser) Delete() { + *parser = Parser{} +} + +// String read handler. +func yamlStringReadHandler(parser *Parser, buffer []byte) (n int, err error) { + if parser.input_pos == len(parser.input) { + return 0, io.EOF + } + n = copy(buffer, parser.input[parser.input_pos:]) + parser.input_pos += n + return n, nil +} + +// Reader read handler. +func yamlReaderReadHandler(parser *Parser, buffer []byte) (n int, err error) { + return parser.input_reader.Read(buffer) +} + +// SetInputString sets a string input. +func (parser *Parser) SetInputString(input []byte) { + if parser.read_handler != nil { + panic("must set the input source only once") + } + parser.read_handler = yamlStringReadHandler + parser.input = input + parser.input_pos = 0 +} + +// SetInputReader sets a file input. +func (parser *Parser) SetInputReader(r io.Reader) { + if parser.read_handler != nil { + panic("must set the input source only once") + } + parser.read_handler = yamlReaderReadHandler + parser.input_reader = r +} + +// SetEncoding sets the source encoding. +func (parser *Parser) SetEncoding(encoding Encoding) { + if parser.encoding != ANY_ENCODING { + panic("must set the encoding only once") + } + parser.encoding = encoding +} + +// NewEmitter creates a new emitter object. +func NewEmitter() Emitter { + return Emitter{ + buffer: make([]byte, output_buffer_size), + raw_buffer: make([]byte, 0, output_raw_buffer_size), + states: make([]EmitterState, 0, initial_stack_size), + events: make([]Event, 0, initial_queue_size), + best_width: -1, + } +} + +// Delete an emitter object. +func (emitter *Emitter) Delete() { + *emitter = Emitter{} +} + +// String write handler. +func yamlStringWriteHandler(emitter *Emitter, buffer []byte) error { + *emitter.output_buffer = append(*emitter.output_buffer, buffer...) + return nil +} + +// yamlWriterWriteHandler uses emitter.output_writer to write the +// emitted text. +func yamlWriterWriteHandler(emitter *Emitter, buffer []byte) error { + _, err := emitter.output_writer.Write(buffer) + return err +} + +// SetOutputString sets a string output. +func (emitter *Emitter) SetOutputString(output_buffer *[]byte) { + if emitter.write_handler != nil { + panic("must set the output target only once") + } + emitter.write_handler = yamlStringWriteHandler + emitter.output_buffer = output_buffer +} + +// SetOutputWriter sets a file output. +func (emitter *Emitter) SetOutputWriter(w io.Writer) { + if emitter.write_handler != nil { + panic("must set the output target only once") + } + emitter.write_handler = yamlWriterWriteHandler + emitter.output_writer = w +} + +// SetEncoding sets the output encoding. +func (emitter *Emitter) SetEncoding(encoding Encoding) { + if emitter.encoding != ANY_ENCODING { + panic("must set the output encoding only once") + } + emitter.encoding = encoding +} + +// SetCanonical sets the canonical output style. +func (emitter *Emitter) SetCanonical(canonical bool) { + emitter.canonical = canonical +} + +// SetIndent sets the indentation increment. +func (emitter *Emitter) SetIndent(indent int) { + if indent < 2 || indent > 9 { + indent = 2 + } + emitter.BestIndent = indent +} + +// SetWidth sets the preferred line width. +func (emitter *Emitter) SetWidth(width int) { + if width < 0 { + width = -1 + } + emitter.best_width = width +} + +// SetUnicode sets if unescaped non-ASCII characters are allowed. +func (emitter *Emitter) SetUnicode(unicode bool) { + emitter.unicode = unicode +} + +// SetLineBreak sets the preferred line break character. +func (emitter *Emitter) SetLineBreak(line_break LineBreak) { + emitter.line_break = line_break +} + +///* +// * Destroy a token object. +// */ +// +//YAML_DECLARE(void) +//yaml_token_delete(yaml_token_t *token) +//{ +// assert(token); // Non-NULL token object expected. +// +// switch (token.type) +// { +// case YAML_TAG_DIRECTIVE_TOKEN: +// yaml_free(token.data.tag_directive.handle); +// yaml_free(token.data.tag_directive.prefix); +// break; +// +// case YAML_ALIAS_TOKEN: +// yaml_free(token.data.alias.value); +// break; +// +// case YAML_ANCHOR_TOKEN: +// yaml_free(token.data.anchor.value); +// break; +// +// case YAML_TAG_TOKEN: +// yaml_free(token.data.tag.handle); +// yaml_free(token.data.tag.suffix); +// break; +// +// case YAML_SCALAR_TOKEN: +// yaml_free(token.data.scalar.value); +// break; +// +// default: +// break; +// } +// +// memset(token, 0, sizeof(yaml_token_t)); +//} +// +///* +// * Check if a string is a valid UTF-8 sequence. +// * +// * Check 'reader.c' for more details on UTF-8 encoding. +// */ +// +//static int +//yaml_check_utf8(yaml_char_t *start, size_t length) +//{ +// yaml_char_t *end = start+length; +// yaml_char_t *pointer = start; +// +// while (pointer < end) { +// unsigned char octet; +// unsigned int width; +// unsigned int value; +// size_t k; +// +// octet = pointer[0]; +// width = (octet & 0x80) == 0x00 ? 1 : +// (octet & 0xE0) == 0xC0 ? 2 : +// (octet & 0xF0) == 0xE0 ? 3 : +// (octet & 0xF8) == 0xF0 ? 4 : 0; +// value = (octet & 0x80) == 0x00 ? octet & 0x7F : +// (octet & 0xE0) == 0xC0 ? octet & 0x1F : +// (octet & 0xF0) == 0xE0 ? octet & 0x0F : +// (octet & 0xF8) == 0xF0 ? octet & 0x07 : 0; +// if (!width) return 0; +// if (pointer+width > end) return 0; +// for (k = 1; k < width; k ++) { +// octet = pointer[k]; +// if ((octet & 0xC0) != 0x80) return 0; +// value = (value << 6) + (octet & 0x3F); +// } +// if (!((width == 1) || +// (width == 2 && value >= 0x80) || +// (width == 3 && value >= 0x800) || +// (width == 4 && value >= 0x10000))) return 0; +// +// pointer += width; +// } +// +// return 1; +//} +// + +// NewStreamStartEvent creates a new STREAM-START event. +func NewStreamStartEvent(encoding Encoding) Event { + return Event{ + Type: STREAM_START_EVENT, + encoding: encoding, + } +} + +// NewStreamEndEvent creates a new STREAM-END event. +func NewStreamEndEvent() Event { + return Event{ + Type: STREAM_END_EVENT, + } +} + +// NewDocumentStartEvent creates a new DOCUMENT-START event. +func NewDocumentStartEvent(version_directive *VersionDirective, tag_directives []TagDirective, implicit bool) Event { + return Event{ + Type: DOCUMENT_START_EVENT, + version_directive: version_directive, + tag_directives: tag_directives, + Implicit: implicit, + } +} + +// NewDocumentEndEvent creates a new DOCUMENT-END event. +func NewDocumentEndEvent(implicit bool) Event { + return Event{ + Type: DOCUMENT_END_EVENT, + Implicit: implicit, + } +} + +// NewAliasEvent creates a new ALIAS event. +func NewAliasEvent(anchor []byte) Event { + return Event{ + Type: ALIAS_EVENT, + Anchor: anchor, + } +} + +// NewScalarEvent creates a new SCALAR event. +func NewScalarEvent(anchor, tag, value []byte, plain_implicit, quoted_implicit bool, style ScalarStyle) Event { + return Event{ + Type: SCALAR_EVENT, + Anchor: anchor, + Tag: tag, + Value: value, + Implicit: plain_implicit, + quoted_implicit: quoted_implicit, + Style: Style(style), + } +} + +// NewSequenceStartEvent creates a new SEQUENCE-START event. +func NewSequenceStartEvent(anchor, tag []byte, implicit bool, style SequenceStyle) Event { + return Event{ + Type: SEQUENCE_START_EVENT, + Anchor: anchor, + Tag: tag, + Implicit: implicit, + Style: Style(style), + } +} + +// NewSequenceEndEvent creates a new SEQUENCE-END event. +func NewSequenceEndEvent() Event { + return Event{ + Type: SEQUENCE_END_EVENT, + } +} + +// NewMappingStartEvent creates a new MAPPING-START event. +func NewMappingStartEvent(anchor, tag []byte, implicit bool, style MappingStyle) Event { + return Event{ + Type: MAPPING_START_EVENT, + Anchor: anchor, + Tag: tag, + Implicit: implicit, + Style: Style(style), + } +} + +// NewMappingEndEvent creates a new MAPPING-END event. +func NewMappingEndEvent() Event { + return Event{ + Type: MAPPING_END_EVENT, + } +} + +// Delete an event object. +func (e *Event) Delete() { + *e = Event{} +} + +///* +// * Create a document object. +// */ +// +//YAML_DECLARE(int) +//yaml_document_initialize(document *yaml_document_t, +// version_directive *yaml_version_directive_t, +// tag_directives_start *yaml_tag_directive_t, +// tag_directives_end *yaml_tag_directive_t, +// start_implicit int, end_implicit int) +//{ +// struct { +// error yaml_error_type_t +// } context +// struct { +// start *yaml_node_t +// end *yaml_node_t +// top *yaml_node_t +// } nodes = { NULL, NULL, NULL } +// version_directive_copy *yaml_version_directive_t = NULL +// struct { +// start *yaml_tag_directive_t +// end *yaml_tag_directive_t +// top *yaml_tag_directive_t +// } tag_directives_copy = { NULL, NULL, NULL } +// value yaml_tag_directive_t = { NULL, NULL } +// mark yaml_mark_t = { 0, 0, 0 } +// +// assert(document) // Non-NULL document object is expected. +// assert((tag_directives_start && tag_directives_end) || +// (tag_directives_start == tag_directives_end)) +// // Valid tag directives are expected. +// +// if (!STACK_INIT(&context, nodes, INITIAL_STACK_SIZE)) goto error +// +// if (version_directive) { +// version_directive_copy = yaml_malloc(sizeof(yaml_version_directive_t)) +// if (!version_directive_copy) goto error +// version_directive_copy.major = version_directive.major +// version_directive_copy.minor = version_directive.minor +// } +// +// if (tag_directives_start != tag_directives_end) { +// tag_directive *yaml_tag_directive_t +// if (!STACK_INIT(&context, tag_directives_copy, INITIAL_STACK_SIZE)) +// goto error +// for (tag_directive = tag_directives_start +// tag_directive != tag_directives_end; tag_directive ++) { +// assert(tag_directive.handle) +// assert(tag_directive.prefix) +// if (!yaml_check_utf8(tag_directive.handle, +// strlen((char *)tag_directive.handle))) +// goto error +// if (!yaml_check_utf8(tag_directive.prefix, +// strlen((char *)tag_directive.prefix))) +// goto error +// value.handle = yaml_strdup(tag_directive.handle) +// value.prefix = yaml_strdup(tag_directive.prefix) +// if (!value.handle || !value.prefix) goto error +// if (!PUSH(&context, tag_directives_copy, value)) +// goto error +// value.handle = NULL +// value.prefix = NULL +// } +// } +// +// DOCUMENT_INIT(*document, nodes.start, nodes.end, version_directive_copy, +// tag_directives_copy.start, tag_directives_copy.top, +// start_implicit, end_implicit, mark, mark) +// +// return 1 +// +//error: +// STACK_DEL(&context, nodes) +// yaml_free(version_directive_copy) +// while (!STACK_EMPTY(&context, tag_directives_copy)) { +// value yaml_tag_directive_t = POP(&context, tag_directives_copy) +// yaml_free(value.handle) +// yaml_free(value.prefix) +// } +// STACK_DEL(&context, tag_directives_copy) +// yaml_free(value.handle) +// yaml_free(value.prefix) +// +// return 0 +//} +// +///* +// * Destroy a document object. +// */ +// +//YAML_DECLARE(void) +//yaml_document_delete(document *yaml_document_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// tag_directive *yaml_tag_directive_t +// +// context.error = YAML_NO_ERROR // Eliminate a compiler warning. +// +// assert(document) // Non-NULL document object is expected. +// +// while (!STACK_EMPTY(&context, document.nodes)) { +// node yaml_node_t = POP(&context, document.nodes) +// yaml_free(node.tag) +// switch (node.type) { +// case YAML_SCALAR_NODE: +// yaml_free(node.data.scalar.value) +// break +// case YAML_SEQUENCE_NODE: +// STACK_DEL(&context, node.data.sequence.items) +// break +// case YAML_MAPPING_NODE: +// STACK_DEL(&context, node.data.mapping.pairs) +// break +// default: +// assert(0) // Should not happen. +// } +// } +// STACK_DEL(&context, document.nodes) +// +// yaml_free(document.version_directive) +// for (tag_directive = document.tag_directives.start +// tag_directive != document.tag_directives.end +// tag_directive++) { +// yaml_free(tag_directive.handle) +// yaml_free(tag_directive.prefix) +// } +// yaml_free(document.tag_directives.start) +// +// memset(document, 0, sizeof(yaml_document_t)) +//} +// +///** +// * Get a document node. +// */ +// +//YAML_DECLARE(yaml_node_t *) +//yaml_document_get_node(document *yaml_document_t, index int) +//{ +// assert(document) // Non-NULL document object is expected. +// +// if (index > 0 && document.nodes.start + index <= document.nodes.top) { +// return document.nodes.start + index - 1 +// } +// return NULL +//} +// +///** +// * Get the root object. +// */ +// +//YAML_DECLARE(yaml_node_t *) +//yaml_document_get_root_node(document *yaml_document_t) +//{ +// assert(document) // Non-NULL document object is expected. +// +// if (document.nodes.top != document.nodes.start) { +// return document.nodes.start +// } +// return NULL +//} +// +///* +// * Add a scalar node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_scalar(document *yaml_document_t, +// tag *yaml_char_t, value *yaml_char_t, length int, +// style yaml_scalar_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// value_copy *yaml_char_t = NULL +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// assert(value) // Non-NULL value is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_SCALAR_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (length < 0) { +// length = strlen((char *)value) +// } +// +// if (!yaml_check_utf8(value, length)) goto error +// value_copy = yaml_malloc(length+1) +// if (!value_copy) goto error +// memcpy(value_copy, value, length) +// value_copy[length] = '\0' +// +// SCALAR_NODE_INIT(node, tag_copy, value_copy, length, style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// yaml_free(tag_copy) +// yaml_free(value_copy) +// +// return 0 +//} +// +///* +// * Add a sequence node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_sequence(document *yaml_document_t, +// tag *yaml_char_t, style yaml_sequence_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// struct { +// start *yaml_node_item_t +// end *yaml_node_item_t +// top *yaml_node_item_t +// } items = { NULL, NULL, NULL } +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_SEQUENCE_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (!STACK_INIT(&context, items, INITIAL_STACK_SIZE)) goto error +// +// SEQUENCE_NODE_INIT(node, tag_copy, items.start, items.end, +// style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// STACK_DEL(&context, items) +// yaml_free(tag_copy) +// +// return 0 +//} +// +///* +// * Add a mapping node to a document. +// */ +// +//YAML_DECLARE(int) +//yaml_document_add_mapping(document *yaml_document_t, +// tag *yaml_char_t, style yaml_mapping_style_t) +//{ +// struct { +// error yaml_error_type_t +// } context +// mark yaml_mark_t = { 0, 0, 0 } +// tag_copy *yaml_char_t = NULL +// struct { +// start *yaml_node_pair_t +// end *yaml_node_pair_t +// top *yaml_node_pair_t +// } pairs = { NULL, NULL, NULL } +// node yaml_node_t +// +// assert(document) // Non-NULL document object is expected. +// +// if (!tag) { +// tag = (yaml_char_t *)YAML_DEFAULT_MAPPING_TAG +// } +// +// if (!yaml_check_utf8(tag, strlen((char *)tag))) goto error +// tag_copy = yaml_strdup(tag) +// if (!tag_copy) goto error +// +// if (!STACK_INIT(&context, pairs, INITIAL_STACK_SIZE)) goto error +// +// MAPPING_NODE_INIT(node, tag_copy, pairs.start, pairs.end, +// style, mark, mark) +// if (!PUSH(&context, document.nodes, node)) goto error +// +// return document.nodes.top - document.nodes.start +// +//error: +// STACK_DEL(&context, pairs) +// yaml_free(tag_copy) +// +// return 0 +//} +// +///* +// * Append an item to a sequence node. +// */ +// +//YAML_DECLARE(int) +//yaml_document_append_sequence_item(document *yaml_document_t, +// sequence int, item int) +//{ +// struct { +// error yaml_error_type_t +// } context +// +// assert(document) // Non-NULL document is required. +// assert(sequence > 0 +// && document.nodes.start + sequence <= document.nodes.top) +// // Valid sequence id is required. +// assert(document.nodes.start[sequence-1].type == YAML_SEQUENCE_NODE) +// // A sequence node is required. +// assert(item > 0 && document.nodes.start + item <= document.nodes.top) +// // Valid item id is required. +// +// if (!PUSH(&context, +// document.nodes.start[sequence-1].data.sequence.items, item)) +// return 0 +// +// return 1 +//} +// +///* +// * Append a pair of a key and a value to a mapping node. +// */ +// +//YAML_DECLARE(int) +//yaml_document_append_mapping_pair(document *yaml_document_t, +// mapping int, key int, value int) +//{ +// struct { +// error yaml_error_type_t +// } context +// +// pair yaml_node_pair_t +// +// assert(document) // Non-NULL document is required. +// assert(mapping > 0 +// && document.nodes.start + mapping <= document.nodes.top) +// // Valid mapping id is required. +// assert(document.nodes.start[mapping-1].type == YAML_MAPPING_NODE) +// // A mapping node is required. +// assert(key > 0 && document.nodes.start + key <= document.nodes.top) +// // Valid key id is required. +// assert(value > 0 && document.nodes.start + value <= document.nodes.top) +// // Valid value id is required. +// +// pair.key = key +// pair.value = value +// +// if (!PUSH(&context, +// document.nodes.start[mapping-1].data.mapping.pairs, pair)) +// return 0 +// +// return 1 +//} +// +// diff --git a/vendor/go.yaml.in/yaml/v4/internal/libyaml/doc.go b/vendor/go.yaml.in/yaml/v4/internal/libyaml/doc.go new file mode 100644 index 00000000..6800825c --- /dev/null +++ b/vendor/go.yaml.in/yaml/v4/internal/libyaml/doc.go @@ -0,0 +1,5 @@ +// Package libyaml contains internal helpers for working with YAML +// +// It's a reworked version of the original libyaml package from go-yaml v2/v3, +// adapted to work with Go specifications +package libyaml diff --git a/vendor/go.yaml.in/yaml/v4/internal/libyaml/emitter.go b/vendor/go.yaml.in/yaml/v4/internal/libyaml/emitter.go new file mode 100644 index 00000000..a2a9cf81 --- /dev/null +++ b/vendor/go.yaml.in/yaml/v4/internal/libyaml/emitter.go @@ -0,0 +1,2064 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package libyaml + +import ( + "bytes" + "errors" + "fmt" +) + +// Flush the buffer if needed. +func (emitter *Emitter) flushIfNeeded() error { + if emitter.buffer_pos+5 >= len(emitter.buffer) { + return emitter.flush() + } + return nil +} + +// Put a character to the output buffer. +func (emitter *Emitter) put(value byte) error { + if emitter.buffer_pos+5 >= len(emitter.buffer) { + if err := emitter.flush(); err != nil { + return err + } + } + emitter.buffer[emitter.buffer_pos] = value + emitter.buffer_pos++ + emitter.column++ + return nil +} + +// Put a line break to the output buffer. +func (emitter *Emitter) putLineBreak() error { + if emitter.buffer_pos+5 >= len(emitter.buffer) { + if err := emitter.flush(); err != nil { + return err + } + } + switch emitter.line_break { + case CR_BREAK: + emitter.buffer[emitter.buffer_pos] = '\r' + emitter.buffer_pos += 1 + case LN_BREAK: + emitter.buffer[emitter.buffer_pos] = '\n' + emitter.buffer_pos += 1 + case CRLN_BREAK: + emitter.buffer[emitter.buffer_pos+0] = '\r' + emitter.buffer[emitter.buffer_pos+1] = '\n' + emitter.buffer_pos += 2 + default: + panic("unknown line break setting") + } + if emitter.column == 0 { + emitter.space_above = true + } + emitter.column = 0 + emitter.line++ + // [Go] Do this here and below and drop from everywhere else (see commented lines). + emitter.indention = true + return nil +} + +// Copy a character from a string into buffer. +func (emitter *Emitter) write(s []byte, i *int) error { + if emitter.buffer_pos+5 >= len(emitter.buffer) { + if err := emitter.flush(); err != nil { + return err + } + } + p := emitter.buffer_pos + w := width(s[*i]) + switch w { + case 4: + emitter.buffer[p+3] = s[*i+3] + fallthrough + case 3: + emitter.buffer[p+2] = s[*i+2] + fallthrough + case 2: + emitter.buffer[p+1] = s[*i+1] + fallthrough + case 1: + emitter.buffer[p+0] = s[*i+0] + default: + panic("unknown character width") + } + emitter.column++ + emitter.buffer_pos += w + *i += w + return nil +} + +// Write a whole string into buffer. +func (emitter *Emitter) writeAll(s []byte) error { + for i := 0; i < len(s); { + if err := emitter.write(s, &i); err != nil { + return err + } + } + return nil +} + +// Copy a line break character from a string into buffer. +func (emitter *Emitter) writeLineBreak(s []byte, i *int) error { + if s[*i] == '\n' { + if err := emitter.putLineBreak(); err != nil { + return err + } + *i++ + } else { + if err := emitter.write(s, i); err != nil { + return err + } + if emitter.column == 0 { + emitter.space_above = true + } + emitter.column = 0 + emitter.line++ + // [Go] Do this here and above and drop from everywhere else (see commented lines). + emitter.indention = true + } + return nil +} + +// Emit an event. +func (emitter *Emitter) Emit(event *Event) error { + emitter.events = append(emitter.events, *event) + for !emitter.needMoreEvents() { + event := &emitter.events[emitter.events_head] + if err := emitter.analyzeEvent(event); err != nil { + return err + } + if err := emitter.stateMachine(event); err != nil { + return err + } + event.Delete() + emitter.events_head++ + } + return nil +} + +// Check if we need to accumulate more events before emitting. +// +// We accumulate extra +// - 1 event for DOCUMENT-START +// - 2 events for SEQUENCE-START +// - 3 events for MAPPING-START +func (emitter *Emitter) needMoreEvents() bool { + if emitter.events_head == len(emitter.events) { + return true + } + var accumulate int + switch emitter.events[emitter.events_head].Type { + case DOCUMENT_START_EVENT: + accumulate = 1 + case SEQUENCE_START_EVENT: + accumulate = 2 + case MAPPING_START_EVENT: + accumulate = 3 + default: + return false + } + if len(emitter.events)-emitter.events_head > accumulate { + return false + } + var level int + for i := emitter.events_head; i < len(emitter.events); i++ { + switch emitter.events[i].Type { + case STREAM_START_EVENT, DOCUMENT_START_EVENT, SEQUENCE_START_EVENT, MAPPING_START_EVENT: + level++ + case STREAM_END_EVENT, DOCUMENT_END_EVENT, SEQUENCE_END_EVENT, MAPPING_END_EVENT: + level-- + } + if level == 0 { + return false + } + } + return true +} + +// Append a directive to the directives stack. +func (emitter *Emitter) appendTagDirective(value *TagDirective, allow_duplicates bool) error { + for i := 0; i < len(emitter.tag_directives); i++ { + if bytes.Equal(value.handle, emitter.tag_directives[i].handle) { + if allow_duplicates { + return nil + } + return errors.New("duplicate %TAG directive") + } + } + + // [Go] Do we actually need to copy this given garbage collection + // and the lack of deallocating destructors? + tag_copy := TagDirective{ + handle: make([]byte, len(value.handle)), + prefix: make([]byte, len(value.prefix)), + } + copy(tag_copy.handle, value.handle) + copy(tag_copy.prefix, value.prefix) + emitter.tag_directives = append(emitter.tag_directives, tag_copy) + return nil +} + +// Increase the indentation level. +func (emitter *Emitter) increaseIndentCompact(flow, indentless bool, compact_seq bool) error { + emitter.indents = append(emitter.indents, emitter.indent) + if emitter.indent < 0 { + if flow { + emitter.indent = emitter.BestIndent + } else { + emitter.indent = 0 + } + } else if !indentless { + // [Go] This was changed so that indentations are more regular. + if emitter.states[len(emitter.states)-1] == EMIT_BLOCK_SEQUENCE_ITEM_STATE { + // The first indent inside a sequence will just skip the "- " indicator. + emitter.indent += 2 + } else { + // Everything else aligns to the chosen indentation. + emitter.indent = emitter.BestIndent * ((emitter.indent + emitter.BestIndent) / emitter.BestIndent) + if compact_seq { + // The value compact_seq passed in is almost always set to `false` when this function is called, + // except when we are dealing with sequence nodes. So this gets triggered to subtract 2 only when we + // are increasing the indent to account for sequence nodes, which will be correct because we need to + // subtract 2 to account for the - at the beginning of the sequence node. + emitter.indent = emitter.indent - 2 + } + } + } + return nil +} + +// State dispatcher. +func (emitter *Emitter) stateMachine(event *Event) error { + switch emitter.state { + default: + case EMIT_STREAM_START_STATE: + return emitter.emitStreamStart(event) + + case EMIT_FIRST_DOCUMENT_START_STATE: + return emitter.emitDocumentStart(event, true) + + case EMIT_DOCUMENT_START_STATE: + return emitter.emitDocumentStart(event, false) + + case EMIT_DOCUMENT_CONTENT_STATE: + return emitter.emitDocumentContent(event) + + case EMIT_DOCUMENT_END_STATE: + return emitter.emitDocumentEnd(event) + + case EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE: + return emitter.emitFlowSequenceItem(event, true, false) + + case EMIT_FLOW_SEQUENCE_TRAIL_ITEM_STATE: + return emitter.emitFlowSequenceItem(event, false, true) + + case EMIT_FLOW_SEQUENCE_ITEM_STATE: + return emitter.emitFlowSequenceItem(event, false, false) + + case EMIT_FLOW_MAPPING_FIRST_KEY_STATE: + return emitter.emitFlowMappingKey(event, true, false) + + case EMIT_FLOW_MAPPING_TRAIL_KEY_STATE: + return emitter.emitFlowMappingKey(event, false, true) + + case EMIT_FLOW_MAPPING_KEY_STATE: + return emitter.emitFlowMappingKey(event, false, false) + + case EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE: + return emitter.emitFlowMappingValue(event, true) + + case EMIT_FLOW_MAPPING_VALUE_STATE: + return emitter.emitFlowMappingValue(event, false) + + case EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE: + return emitter.emitBlockSequenceItem(event, true) + + case EMIT_BLOCK_SEQUENCE_ITEM_STATE: + return emitter.emitBlockSequenceItem(event, false) + + case EMIT_BLOCK_MAPPING_FIRST_KEY_STATE: + return emitter.emitBlockMappingKey(event, true) + + case EMIT_BLOCK_MAPPING_KEY_STATE: + return emitter.emitBlockMappingKey(event, false) + + case EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE: + return emitter.emitBlockMappingValue(event, true) + + case EMIT_BLOCK_MAPPING_VALUE_STATE: + return emitter.emitBlockMappingValue(event, false) + + case EMIT_END_STATE: + return errors.New("expected nothing after STREAM-END") + } + panic("invalid emitter state") +} + +// Expect STREAM-START. +func (emitter *Emitter) emitStreamStart(event *Event) error { + if event.Type != STREAM_START_EVENT { + return errors.New("expected STREAM-START") + } + if emitter.encoding == ANY_ENCODING { + emitter.encoding = event.encoding + if emitter.encoding == ANY_ENCODING { + emitter.encoding = UTF8_ENCODING + } + } + if emitter.BestIndent < 2 || emitter.BestIndent > 9 { + emitter.BestIndent = 2 + } + if emitter.best_width >= 0 && emitter.best_width <= emitter.BestIndent*2 { + emitter.best_width = 80 + } + if emitter.best_width < 0 { + emitter.best_width = 1<<31 - 1 + } + if emitter.line_break == ANY_BREAK { + emitter.line_break = LN_BREAK + } + + emitter.indent = -1 + emitter.line = 0 + emitter.column = 0 + emitter.whitespace = true + emitter.indention = true + emitter.space_above = true + emitter.foot_indent = -1 + + if emitter.encoding != UTF8_ENCODING { + if err := emitter.writeBom(); err != nil { + return err + } + } + emitter.state = EMIT_FIRST_DOCUMENT_START_STATE + return nil +} + +// Expect DOCUMENT-START or STREAM-END. +func (emitter *Emitter) emitDocumentStart(event *Event, first bool) error { + if event.Type == DOCUMENT_START_EVENT { + + if event.version_directive != nil { + if err := emitter.analyzeVersionDirective(event.version_directive); err != nil { + return err + } + } + + for i := 0; i < len(event.tag_directives); i++ { + tag_directive := &event.tag_directives[i] + if err := emitter.analyzeTagDirective(tag_directive); err != nil { + return err + } + if err := emitter.appendTagDirective(tag_directive, false); err != nil { + return err + } + } + + for i := 0; i < len(default_tag_directives); i++ { + tag_directive := &default_tag_directives[i] + if err := emitter.appendTagDirective(tag_directive, true); err != nil { + return err + } + } + + implicit := event.Implicit + if !first || emitter.canonical { + implicit = false + } + + if emitter.OpenEnded && (event.version_directive != nil || len(event.tag_directives) > 0) { + if err := emitter.writeIndicator([]byte("..."), true, false, false); err != nil { + return err + } + if err := emitter.writeIndent(); err != nil { + return err + } + } + + if event.version_directive != nil { + implicit = false + if err := emitter.writeIndicator([]byte("%YAML"), true, false, false); err != nil { + return err + } + if err := emitter.writeIndicator([]byte("1.1"), true, false, false); err != nil { + return err + } + if err := emitter.writeIndent(); err != nil { + return err + } + } + + if len(event.tag_directives) > 0 { + implicit = false + for i := 0; i < len(event.tag_directives); i++ { + tag_directive := &event.tag_directives[i] + if err := emitter.writeIndicator([]byte("%TAG"), true, false, false); err != nil { + return err + } + if err := emitter.writeTagHandle(tag_directive.handle); err != nil { + return err + } + if err := emitter.writeTagContent(tag_directive.prefix, true); err != nil { + return err + } + if err := emitter.writeIndent(); err != nil { + return err + } + } + } + + if emitter.checkEmptyDocument() { + implicit = false + } + if !implicit { + if err := emitter.writeIndent(); err != nil { + return err + } + if err := emitter.writeIndicator([]byte("---"), true, false, false); err != nil { + return err + } + if emitter.canonical || true { + if err := emitter.writeIndent(); err != nil { + return err + } + } + } + + if len(emitter.HeadComment) > 0 { + if err := emitter.processHeadComment(); err != nil { + return err + } + if err := emitter.putLineBreak(); err != nil { + return err + } + } + + emitter.state = EMIT_DOCUMENT_CONTENT_STATE + return nil + } + + if event.Type == STREAM_END_EVENT { + if emitter.OpenEnded { + if err := emitter.writeIndicator([]byte("..."), true, false, false); err != nil { + return err + } + if err := emitter.writeIndent(); err != nil { + return err + } + } + if err := emitter.flush(); err != nil { + return err + } + emitter.state = EMIT_END_STATE + return nil + } + + return errors.New("expected DOCUMENT-START or STREAM-END") +} + +// emitter preserves the original signature and delegates to +// increaseIndentCompact without compact-sequence indentation +func (emitter *Emitter) increaseIndent(flow, indentless bool) error { + return emitter.increaseIndentCompact(flow, indentless, false) +} + +// processLineComment preserves the original signature and delegates to +// processLineCommentLinebreak passing false for linebreak +func (emitter *Emitter) processLineComment() error { + return emitter.processLineCommentLinebreak(false) +} + +// Expect the root node. +func (emitter *Emitter) emitDocumentContent(event *Event) error { + emitter.states = append(emitter.states, EMIT_DOCUMENT_END_STATE) + + if err := emitter.processHeadComment(); err != nil { + return err + } + if err := emitter.emitNode(event, true, false, false, false); err != nil { + return err + } + if err := emitter.processLineComment(); err != nil { + return err + } + if err := emitter.processFootComment(); err != nil { + return err + } + return nil +} + +// Expect DOCUMENT-END. +func (emitter *Emitter) emitDocumentEnd(event *Event) error { + if event.Type != DOCUMENT_END_EVENT { + return errors.New("expected DOCUMENT-END") + } + // [Go] Force document foot separation. + emitter.foot_indent = 0 + if err := emitter.processFootComment(); err != nil { + return err + } + emitter.foot_indent = -1 + if err := emitter.writeIndent(); err != nil { + return err + } + if !event.Implicit { + // [Go] Allocate the slice elsewhere. + if err := emitter.writeIndicator([]byte("..."), true, false, false); err != nil { + return err + } + if err := emitter.writeIndent(); err != nil { + return err + } + } + if err := emitter.flush(); err != nil { + return err + } + emitter.state = EMIT_DOCUMENT_START_STATE + emitter.tag_directives = emitter.tag_directives[:0] + return nil +} + +// Expect a flow item node. +func (emitter *Emitter) emitFlowSequenceItem(event *Event, first, trail bool) error { + if first { + if err := emitter.writeIndicator([]byte{'['}, true, true, false); err != nil { + return err + } + if err := emitter.increaseIndent(true, false); err != nil { + return err + } + emitter.flow_level++ + } + + if event.Type == SEQUENCE_END_EVENT { + if emitter.canonical && !first && !trail { + if err := emitter.writeIndicator([]byte{','}, false, false, false); err != nil { + return err + } + } + emitter.flow_level-- + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + if emitter.column == 0 || emitter.canonical && !first { + if err := emitter.writeIndent(); err != nil { + return err + } + } + if err := emitter.writeIndicator([]byte{']'}, false, false, false); err != nil { + return err + } + if err := emitter.processLineComment(); err != nil { + return err + } + if err := emitter.processFootComment(); err != nil { + return err + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + + return nil + } + + if !first && !trail { + if err := emitter.writeIndicator([]byte{','}, false, false, false); err != nil { + return err + } + } + + if err := emitter.processHeadComment(); err != nil { + return err + } + if emitter.column == 0 { + if err := emitter.writeIndent(); err != nil { + return err + } + } + + if emitter.canonical || emitter.column > emitter.best_width { + if err := emitter.writeIndent(); err != nil { + return err + } + } + if len(emitter.LineComment)+len(emitter.FootComment)+len(emitter.TailComment) > 0 { + emitter.states = append(emitter.states, EMIT_FLOW_SEQUENCE_TRAIL_ITEM_STATE) + } else { + emitter.states = append(emitter.states, EMIT_FLOW_SEQUENCE_ITEM_STATE) + } + if err := emitter.emitNode(event, false, true, false, false); err != nil { + return err + } + if len(emitter.LineComment)+len(emitter.FootComment)+len(emitter.TailComment) > 0 { + if err := emitter.writeIndicator([]byte{','}, false, false, false); err != nil { + return err + } + } + if err := emitter.processLineComment(); err != nil { + return err + } + if err := emitter.processFootComment(); err != nil { + return err + } + return nil +} + +// Expect a flow key node. +func (emitter *Emitter) emitFlowMappingKey(event *Event, first, trail bool) error { + if first { + if err := emitter.writeIndicator([]byte{'{'}, true, true, false); err != nil { + return err + } + if err := emitter.increaseIndent(true, false); err != nil { + return err + } + emitter.flow_level++ + } + + if event.Type == MAPPING_END_EVENT { + if (emitter.canonical || len(emitter.HeadComment)+len(emitter.FootComment)+len(emitter.TailComment) > 0) && !first && !trail { + if err := emitter.writeIndicator([]byte{','}, false, false, false); err != nil { + return err + } + } + if err := emitter.processHeadComment(); err != nil { + return err + } + emitter.flow_level-- + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + if emitter.canonical && !first { + if err := emitter.writeIndent(); err != nil { + return err + } + } + if err := emitter.writeIndicator([]byte{'}'}, false, false, false); err != nil { + return err + } + if err := emitter.processLineComment(); err != nil { + return err + } + if err := emitter.processFootComment(); err != nil { + return err + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return nil + } + + if !first && !trail { + if err := emitter.writeIndicator([]byte{','}, false, false, false); err != nil { + return err + } + } + + if err := emitter.processHeadComment(); err != nil { + return err + } + + if emitter.column == 0 { + if err := emitter.writeIndent(); err != nil { + return err + } + } + + if emitter.canonical || emitter.column > emitter.best_width { + if err := emitter.writeIndent(); err != nil { + return err + } + } + + if !emitter.canonical && emitter.checkSimpleKey() { + emitter.states = append(emitter.states, EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE) + return emitter.emitNode(event, false, false, true, true) + } + if err := emitter.writeIndicator([]byte{'?'}, true, false, false); err != nil { + return err + } + emitter.states = append(emitter.states, EMIT_FLOW_MAPPING_VALUE_STATE) + return emitter.emitNode(event, false, false, true, false) +} + +// Expect a flow value node. +func (emitter *Emitter) emitFlowMappingValue(event *Event, simple bool) error { + if simple { + if err := emitter.writeIndicator([]byte{':'}, false, false, false); err != nil { + return err + } + } else { + if emitter.canonical || emitter.column > emitter.best_width { + if err := emitter.writeIndent(); err != nil { + return err + } + } + if err := emitter.writeIndicator([]byte{':'}, true, false, false); err != nil { + return err + } + } + if len(emitter.LineComment)+len(emitter.FootComment)+len(emitter.TailComment) > 0 { + emitter.states = append(emitter.states, EMIT_FLOW_MAPPING_TRAIL_KEY_STATE) + } else { + emitter.states = append(emitter.states, EMIT_FLOW_MAPPING_KEY_STATE) + } + if err := emitter.emitNode(event, false, false, true, false); err != nil { + return err + } + if len(emitter.LineComment)+len(emitter.FootComment)+len(emitter.TailComment) > 0 { + if err := emitter.writeIndicator([]byte{','}, false, false, false); err != nil { + return err + } + } + if err := emitter.processLineComment(); err != nil { + return err + } + if err := emitter.processFootComment(); err != nil { + return err + } + return nil +} + +// Expect a block item node. +func (emitter *Emitter) emitBlockSequenceItem(event *Event, first bool) error { + if first { + // emitter.mapping context tells us if we are currently in a mapping context. + // emitter.column tells us which column we are in the yaml output. 0 is the first char of the column. + // emitter.indentation tells us if the last character was an indentation character. + // emitter.compact_sequence_indent tells us if '- ' is considered part of the indentation for sequence elements. + // So, `seq` means that we are in a mapping context, and we are either at the first char of the column or + // the last character was not an indentation character, and we consider '- ' part of the indentation + // for sequence elements. + seq := emitter.mapping_context && (emitter.column == 0 || !emitter.indention) && + emitter.CompactSequenceIndent + if err := emitter.increaseIndentCompact(false, false, seq); err != nil { + return err + } + } + if event.Type == SEQUENCE_END_EVENT { + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return nil + } + if err := emitter.processHeadComment(); err != nil { + return err + } + if err := emitter.writeIndent(); err != nil { + return err + } + if err := emitter.writeIndicator([]byte{'-'}, true, false, true); err != nil { + return err + } + emitter.states = append(emitter.states, EMIT_BLOCK_SEQUENCE_ITEM_STATE) + if err := emitter.emitNode(event, false, true, false, false); err != nil { + return err + } + if err := emitter.processLineComment(); err != nil { + return err + } + if err := emitter.processFootComment(); err != nil { + return err + } + return nil +} + +// Expect a block key node. +func (emitter *Emitter) emitBlockMappingKey(event *Event, first bool) error { + if first { + if err := emitter.increaseIndent(false, false); err != nil { + return err + } + } + if err := emitter.processHeadComment(); err != nil { + return err + } + if event.Type == MAPPING_END_EVENT { + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return nil + } + if err := emitter.writeIndent(); err != nil { + return err + } + if len(emitter.LineComment) > 0 { + // [Go] A line comment was provided for the key. That's unusual as the + // scanner associates line comments with the value. Either way, + // save the line comment and render it appropriately later. + emitter.key_line_comment = emitter.LineComment + emitter.LineComment = nil + } + if emitter.checkSimpleKey() { + emitter.states = append(emitter.states, EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE) + if err := emitter.emitNode(event, false, false, true, true); err != nil { + return err + } + + if event.Type == ALIAS_EVENT { + // make sure there's a space after the alias + return emitter.put(' ') + } + + return nil + } + if err := emitter.writeIndicator([]byte{'?'}, true, false, true); err != nil { + return err + } + emitter.states = append(emitter.states, EMIT_BLOCK_MAPPING_VALUE_STATE) + return emitter.emitNode(event, false, false, true, false) +} + +// Expect a block value node. +func (emitter *Emitter) emitBlockMappingValue(event *Event, simple bool) error { + if simple { + if err := emitter.writeIndicator([]byte{':'}, false, false, false); err != nil { + return err + } + } else { + if err := emitter.writeIndent(); err != nil { + return err + } + if err := emitter.writeIndicator([]byte{':'}, true, false, true); err != nil { + return err + } + } + if len(emitter.key_line_comment) > 0 { + // [Go] Line comments are generally associated with the value, but when there's + // no value on the same line as a mapping key they end up attached to the + // key itself. + if event.Type == SCALAR_EVENT { + if len(emitter.LineComment) == 0 { + // A scalar is coming and it has no line comments by itself yet, + // so just let it handle the line comment as usual. If it has a + // line comment, we can't have both so the one from the key is lost. + emitter.LineComment = emitter.key_line_comment + emitter.key_line_comment = nil + } + } else if event.SequenceStyle() != FLOW_SEQUENCE_STYLE && (event.Type == MAPPING_START_EVENT || event.Type == SEQUENCE_START_EVENT) { + // An indented block follows, so write the comment right now. + emitter.LineComment, emitter.key_line_comment = emitter.key_line_comment, emitter.LineComment + if err := emitter.processLineComment(); err != nil { + return err + } + emitter.LineComment, emitter.key_line_comment = emitter.key_line_comment, emitter.LineComment + } + } + emitter.states = append(emitter.states, EMIT_BLOCK_MAPPING_KEY_STATE) + if err := emitter.emitNode(event, false, false, true, false); err != nil { + return err + } + if err := emitter.processLineComment(); err != nil { + return err + } + if err := emitter.processFootComment(); err != nil { + return err + } + return nil +} + +func (emitter *Emitter) silentNilEvent(event *Event) bool { + return event.Type == SCALAR_EVENT && event.Implicit && !emitter.canonical && len(emitter.scalar_data.value) == 0 +} + +// Expect a node. +func (emitter *Emitter) emitNode(event *Event, + root bool, sequence bool, mapping bool, simple_key bool, +) error { + emitter.root_context = root + emitter.sequence_context = sequence + emitter.mapping_context = mapping + emitter.simple_key_context = simple_key + + switch event.Type { + case ALIAS_EVENT: + return emitter.emitAlias(event) + case SCALAR_EVENT: + return emitter.emitScalar(event) + case SEQUENCE_START_EVENT: + return emitter.emitSequenceStart(event) + case MAPPING_START_EVENT: + return emitter.emitMappingStart(event) + default: + return fmt.Errorf("expected SCALAR, SEQUENCE-START, MAPPING-START, or ALIAS, but got %v", event.Type) + } +} + +// Expect ALIAS. +func (emitter *Emitter) emitAlias(event *Event) error { + if err := emitter.processAnchor(); err != nil { + return err + } + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return nil +} + +// Expect SCALAR. +func (emitter *Emitter) emitScalar(event *Event) error { + if err := emitter.selectScalarStyle(event); err != nil { + return err + } + if err := emitter.processAnchor(); err != nil { + return err + } + if err := emitter.processTag(); err != nil { + return err + } + if err := emitter.increaseIndent(true, false); err != nil { + return err + } + if err := emitter.processScalar(); err != nil { + return err + } + emitter.indent = emitter.indents[len(emitter.indents)-1] + emitter.indents = emitter.indents[:len(emitter.indents)-1] + emitter.state = emitter.states[len(emitter.states)-1] + emitter.states = emitter.states[:len(emitter.states)-1] + return nil +} + +// Expect SEQUENCE-START. +func (emitter *Emitter) emitSequenceStart(event *Event) error { + if err := emitter.processAnchor(); err != nil { + return err + } + if err := emitter.processTag(); err != nil { + return err + } + if emitter.flow_level > 0 || emitter.canonical || event.SequenceStyle() == FLOW_SEQUENCE_STYLE || + emitter.checkEmptySequence() { + emitter.state = EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE + } else { + emitter.state = EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE + } + return nil +} + +// Expect MAPPING-START. +func (emitter *Emitter) emitMappingStart(event *Event) error { + if err := emitter.processAnchor(); err != nil { + return err + } + if err := emitter.processTag(); err != nil { + return err + } + if emitter.flow_level > 0 || emitter.canonical || event.MappingStyle() == FLOW_MAPPING_STYLE || + emitter.checkEmptyMapping() { + emitter.state = EMIT_FLOW_MAPPING_FIRST_KEY_STATE + } else { + emitter.state = EMIT_BLOCK_MAPPING_FIRST_KEY_STATE + } + return nil +} + +// Check if the document content is an empty scalar. +func (emitter *Emitter) checkEmptyDocument() bool { + return false // [Go] Huh? +} + +// Check if the next events represent an empty sequence. +func (emitter *Emitter) checkEmptySequence() bool { + if len(emitter.events)-emitter.events_head < 2 { + return false + } + return emitter.events[emitter.events_head].Type == SEQUENCE_START_EVENT && + emitter.events[emitter.events_head+1].Type == SEQUENCE_END_EVENT +} + +// Check if the next events represent an empty mapping. +func (emitter *Emitter) checkEmptyMapping() bool { + if len(emitter.events)-emitter.events_head < 2 { + return false + } + return emitter.events[emitter.events_head].Type == MAPPING_START_EVENT && + emitter.events[emitter.events_head+1].Type == MAPPING_END_EVENT +} + +// Check if the next node can be expressed as a simple key. +func (emitter *Emitter) checkSimpleKey() bool { + length := 0 + switch emitter.events[emitter.events_head].Type { + case ALIAS_EVENT: + length += len(emitter.anchor_data.anchor) + case SCALAR_EVENT: + if emitter.scalar_data.multiline { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + + len(emitter.scalar_data.value) + case SEQUENCE_START_EVENT: + if !emitter.checkEmptySequence() { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + case MAPPING_START_EVENT: + if !emitter.checkEmptyMapping() { + return false + } + length += len(emitter.anchor_data.anchor) + + len(emitter.tag_data.handle) + + len(emitter.tag_data.suffix) + default: + return false + } + return length <= 128 +} + +// Determine an acceptable scalar style. +func (emitter *Emitter) selectScalarStyle(event *Event) error { + no_tag := len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 + if no_tag && !event.Implicit && !event.quoted_implicit { + return errors.New("neither tag nor implicit flags are specified") + } + + style := event.ScalarStyle() + if style == ANY_SCALAR_STYLE { + style = PLAIN_SCALAR_STYLE + } + if emitter.canonical { + style = DOUBLE_QUOTED_SCALAR_STYLE + } + if emitter.simple_key_context && emitter.scalar_data.multiline { + style = DOUBLE_QUOTED_SCALAR_STYLE + } + + if style == PLAIN_SCALAR_STYLE { + if emitter.flow_level > 0 && !emitter.scalar_data.flow_plain_allowed || + emitter.flow_level == 0 && !emitter.scalar_data.block_plain_allowed { + style = SINGLE_QUOTED_SCALAR_STYLE + } + if len(emitter.scalar_data.value) == 0 && (emitter.flow_level > 0 || emitter.simple_key_context) { + style = SINGLE_QUOTED_SCALAR_STYLE + } + if no_tag && !event.Implicit { + style = SINGLE_QUOTED_SCALAR_STYLE + } + } + if style == SINGLE_QUOTED_SCALAR_STYLE { + if !emitter.scalar_data.single_quoted_allowed { + style = DOUBLE_QUOTED_SCALAR_STYLE + } + } + if style == LITERAL_SCALAR_STYLE || style == FOLDED_SCALAR_STYLE { + if !emitter.scalar_data.block_allowed || emitter.flow_level > 0 || emitter.simple_key_context { + style = DOUBLE_QUOTED_SCALAR_STYLE + } + } + + if no_tag && !event.quoted_implicit && style != PLAIN_SCALAR_STYLE { + emitter.tag_data.handle = []byte{'!'} + } + emitter.scalar_data.style = style + return nil +} + +// Write an anchor. +func (emitter *Emitter) processAnchor() error { + if emitter.anchor_data.anchor == nil { + return nil + } + c := []byte{'&'} + if emitter.anchor_data.alias { + c[0] = '*' + } + if err := emitter.writeIndicator(c, true, false, false); err != nil { + return err + } + return emitter.writeAnchor(emitter.anchor_data.anchor) +} + +// Write a tag. +func (emitter *Emitter) processTag() error { + if len(emitter.tag_data.handle) == 0 && len(emitter.tag_data.suffix) == 0 { + return nil + } + if len(emitter.tag_data.handle) > 0 { + if err := emitter.writeTagHandle(emitter.tag_data.handle); err != nil { + return err + } + if len(emitter.tag_data.suffix) > 0 { + if err := emitter.writeTagContent(emitter.tag_data.suffix, false); err != nil { + return err + } + } + } else { + // [Go] Allocate these slices elsewhere. + if err := emitter.writeIndicator([]byte("!<"), true, false, false); err != nil { + return err + } + if err := emitter.writeTagContent(emitter.tag_data.suffix, false); err != nil { + return err + } + if err := emitter.writeIndicator([]byte{'>'}, false, false, false); err != nil { + return err + } + } + return nil +} + +// Write a scalar. +func (emitter *Emitter) processScalar() error { + switch emitter.scalar_data.style { + case PLAIN_SCALAR_STYLE: + return emitter.writePlainScalar(emitter.scalar_data.value, !emitter.simple_key_context) + + case SINGLE_QUOTED_SCALAR_STYLE: + return emitter.writeSingleQuotedScalar(emitter.scalar_data.value, !emitter.simple_key_context) + + case DOUBLE_QUOTED_SCALAR_STYLE: + return emitter.writeDoubleQuotedScalar(emitter.scalar_data.value, !emitter.simple_key_context) + + case LITERAL_SCALAR_STYLE: + return emitter.writeLiteralScalar(emitter.scalar_data.value) + + case FOLDED_SCALAR_STYLE: + return emitter.writeFoldedScalar(emitter.scalar_data.value) + } + panic("unknown scalar style") +} + +// Write a head comment. +func (emitter *Emitter) processHeadComment() error { + if len(emitter.TailComment) > 0 { + if err := emitter.writeIndent(); err != nil { + return err + } + if err := emitter.writeComment(emitter.TailComment); err != nil { + return err + } + emitter.TailComment = emitter.TailComment[:0] + emitter.foot_indent = emitter.indent + if emitter.foot_indent < 0 { + emitter.foot_indent = 0 + } + } + + if len(emitter.HeadComment) == 0 { + return nil + } + if err := emitter.writeIndent(); err != nil { + return err + } + if err := emitter.writeComment(emitter.HeadComment); err != nil { + return err + } + emitter.HeadComment = emitter.HeadComment[:0] + return nil +} + +// Write an line comment. +func (emitter *Emitter) processLineCommentLinebreak(linebreak bool) error { + if len(emitter.LineComment) == 0 { + // The next 3 lines are needed to resolve an issue with leading newlines + // See https://github.com/go-yaml/yaml/issues/755 + // When linebreak is set to true, put_break will be called and will add + // the needed newline. + if linebreak { + if err := emitter.putLineBreak(); err != nil { + return err + } + } + return nil + } + if !emitter.whitespace { + if err := emitter.put(' '); err != nil { + return err + } + } + if err := emitter.writeComment(emitter.LineComment); err != nil { + return err + } + emitter.LineComment = emitter.LineComment[:0] + return nil +} + +// Write a foot comment. +func (emitter *Emitter) processFootComment() error { + if len(emitter.FootComment) == 0 { + return nil + } + if err := emitter.writeIndent(); err != nil { + return err + } + if err := emitter.writeComment(emitter.FootComment); err != nil { + return err + } + emitter.FootComment = emitter.FootComment[:0] + emitter.foot_indent = emitter.indent + if emitter.foot_indent < 0 { + emitter.foot_indent = 0 + } + return nil +} + +// Check if a %YAML directive is valid. +func (emitter *Emitter) analyzeVersionDirective(version_directive *VersionDirective) error { + if version_directive.major != 1 || version_directive.minor != 1 { + return errors.New("incompatible %YAML directive") + } + return nil +} + +// Check if a %TAG directive is valid. +func (emitter *Emitter) analyzeTagDirective(tag_directive *TagDirective) error { + handle := tag_directive.handle + prefix := tag_directive.prefix + if len(handle) == 0 { + return errors.New("tag handle must not be empty") + } + if handle[0] != '!' { + return errors.New("tag handle must start with '!'") + } + if handle[len(handle)-1] != '!' { + return errors.New("tag handle must end with '!'") + } + for i := 1; i < len(handle)-1; i += width(handle[i]) { + if !isAlpha(handle, i) { + return errors.New("tag handle must contain alphanumerical characters only") + } + } + if len(prefix) == 0 { + return errors.New("tag prefix must not be empty") + } + return nil +} + +// Check if an anchor is valid. +func (emitter *Emitter) analyzeAnchor(anchor []byte, alias bool) error { + if len(anchor) == 0 { + if alias { + return errors.New("alias value must not be empty") + } else { + return errors.New("anchor value must not be empty") + } + } + for i := 0; i < len(anchor); i += width(anchor[i]) { + if !isAnchorChar(anchor, i) { + if alias { + return errors.New("alias value must contain valid characters only") + } else { + return errors.New("anchor value must contain valid characters only") + } + } + } + emitter.anchor_data.anchor = anchor + emitter.anchor_data.alias = alias + return nil +} + +// Check if a tag is valid. +func (emitter *Emitter) analyzeTag(tag []byte) error { + if len(tag) == 0 { + return errors.New("tag value must not be empty") + } + for i := 0; i < len(emitter.tag_directives); i++ { + tag_directive := &emitter.tag_directives[i] + if bytes.HasPrefix(tag, tag_directive.prefix) { + emitter.tag_data.handle = tag_directive.handle + emitter.tag_data.suffix = tag[len(tag_directive.prefix):] + return nil + } + } + emitter.tag_data.suffix = tag + return nil +} + +// Check if a scalar is valid. +func (emitter *Emitter) analyzeScalar(value []byte) error { + var block_indicators, + flow_indicators, + line_breaks, + special_characters, + tab_characters, + + leading_space, + leading_break, + trailing_space, + trailing_break, + break_space, + space_break, + + preceded_by_whitespace, + followed_by_whitespace, + previous_space, + previous_break bool + + emitter.scalar_data.value = value + + if len(value) == 0 { + emitter.scalar_data.multiline = false + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = true + emitter.scalar_data.single_quoted_allowed = true + emitter.scalar_data.block_allowed = false + return nil + } + + if len(value) >= 3 && ((value[0] == '-' && value[1] == '-' && value[2] == '-') || (value[0] == '.' && value[1] == '.' && value[2] == '.')) { + block_indicators = true + flow_indicators = true + } + + preceded_by_whitespace = true + for i, w := 0, 0; i < len(value); i += w { + w = width(value[i]) + followed_by_whitespace = i+w >= len(value) || isBlank(value, i+w) + + if i == 0 { + switch value[i] { + case '#', ',', '[', ']', '{', '}', '&', '*', '!', '|', '>', '\'', '"', '%', '@', '`': + flow_indicators = true + block_indicators = true + case '?', ':': + flow_indicators = true + if followed_by_whitespace { + block_indicators = true + } + case '-': + if followed_by_whitespace { + flow_indicators = true + block_indicators = true + } + } + } else { + switch value[i] { + case ',', '?', '[', ']', '{', '}': + flow_indicators = true + case ':': + flow_indicators = true + if followed_by_whitespace { + block_indicators = true + } + case '#': + if preceded_by_whitespace { + flow_indicators = true + block_indicators = true + } + } + } + + if value[i] == '\t' { + tab_characters = true + } else if !isPrintable(value, i) || !isASCII(value, i) && !emitter.unicode { + special_characters = true + } + if isSpace(value, i) { + if i == 0 { + leading_space = true + } + if i+width(value[i]) == len(value) { + trailing_space = true + } + if previous_break { + break_space = true + } + previous_space = true + previous_break = false + } else if isLineBreak(value, i) { + line_breaks = true + if i == 0 { + leading_break = true + } + if i+width(value[i]) == len(value) { + trailing_break = true + } + if previous_space { + space_break = true + } + previous_space = false + previous_break = true + } else { + previous_space = false + previous_break = false + } + + // [Go]: Why 'z'? Couldn't be the end of the string as that's the loop condition. + preceded_by_whitespace = isBlankOrZero(value, i) + } + + emitter.scalar_data.multiline = line_breaks + emitter.scalar_data.flow_plain_allowed = true + emitter.scalar_data.block_plain_allowed = true + emitter.scalar_data.single_quoted_allowed = true + emitter.scalar_data.block_allowed = true + + if leading_space || leading_break || trailing_space || trailing_break { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + } + if trailing_space { + emitter.scalar_data.block_allowed = false + } + if break_space { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + emitter.scalar_data.single_quoted_allowed = false + } + if space_break || tab_characters || special_characters { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + emitter.scalar_data.single_quoted_allowed = false + } + if space_break || special_characters { + emitter.scalar_data.block_allowed = false + } + if line_breaks { + emitter.scalar_data.flow_plain_allowed = false + emitter.scalar_data.block_plain_allowed = false + } + if flow_indicators { + emitter.scalar_data.flow_plain_allowed = false + } + if block_indicators { + emitter.scalar_data.block_plain_allowed = false + } + return nil +} + +// Check if the event data is valid. +func (emitter *Emitter) analyzeEvent(event *Event) error { + emitter.anchor_data.anchor = nil + emitter.tag_data.handle = nil + emitter.tag_data.suffix = nil + emitter.scalar_data.value = nil + + if len(event.HeadComment) > 0 { + emitter.HeadComment = event.HeadComment + } + if len(event.LineComment) > 0 { + emitter.LineComment = event.LineComment + } + if len(event.FootComment) > 0 { + emitter.FootComment = event.FootComment + } + if len(event.TailComment) > 0 { + emitter.TailComment = event.TailComment + } + + switch event.Type { + case ALIAS_EVENT: + if err := emitter.analyzeAnchor(event.Anchor, true); err != nil { + return err + } + + case SCALAR_EVENT: + if len(event.Anchor) > 0 { + if err := emitter.analyzeAnchor(event.Anchor, false); err != nil { + return err + } + } + if len(event.Tag) > 0 && (emitter.canonical || (!event.Implicit && !event.quoted_implicit)) { + if err := emitter.analyzeTag(event.Tag); err != nil { + return err + } + } + if err := emitter.analyzeScalar(event.Value); err != nil { + return err + } + + case SEQUENCE_START_EVENT: + if len(event.Anchor) > 0 { + if err := emitter.analyzeAnchor(event.Anchor, false); err != nil { + return err + } + } + if len(event.Tag) > 0 && (emitter.canonical || !event.Implicit) { + if err := emitter.analyzeTag(event.Tag); err != nil { + return err + } + } + + case MAPPING_START_EVENT: + if len(event.Anchor) > 0 { + if err := emitter.analyzeAnchor(event.Anchor, false); err != nil { + return err + } + } + if len(event.Tag) > 0 && (emitter.canonical || !event.Implicit) { + if err := emitter.analyzeTag(event.Tag); err != nil { + return err + } + } + } + return nil +} + +// Write the BOM character. +func (emitter *Emitter) writeBom() error { + if err := emitter.flushIfNeeded(); err != nil { + return err + } + pos := emitter.buffer_pos + emitter.buffer[pos+0] = '\xEF' + emitter.buffer[pos+1] = '\xBB' + emitter.buffer[pos+2] = '\xBF' + emitter.buffer_pos += 3 + return nil +} + +func (emitter *Emitter) writeIndent() error { + indent := emitter.indent + if indent < 0 { + indent = 0 + } + if !emitter.indention || emitter.column > indent || (emitter.column == indent && !emitter.whitespace) { + if err := emitter.putLineBreak(); err != nil { + return err + } + } + if emitter.foot_indent == indent { + if err := emitter.putLineBreak(); err != nil { + return err + } + } + for emitter.column < indent { + if err := emitter.put(' '); err != nil { + return err + } + } + emitter.whitespace = true + // emitter.indention = true + emitter.space_above = false + emitter.foot_indent = -1 + return nil +} + +func (emitter *Emitter) writeIndicator(indicator []byte, need_whitespace, is_whitespace, is_indention bool) error { + if need_whitespace && !emitter.whitespace { + if err := emitter.put(' '); err != nil { + return err + } + } + if err := emitter.writeAll(indicator); err != nil { + return err + } + emitter.whitespace = is_whitespace + emitter.indention = (emitter.indention && is_indention) + emitter.OpenEnded = false + return nil +} + +func (emitter *Emitter) writeAnchor(value []byte) error { + if err := emitter.writeAll(value); err != nil { + return err + } + emitter.whitespace = false + emitter.indention = false + return nil +} + +func (emitter *Emitter) writeTagHandle(value []byte) error { + if !emitter.whitespace { + if err := emitter.put(' '); err != nil { + return err + } + } + if err := emitter.writeAll(value); err != nil { + return err + } + emitter.whitespace = false + emitter.indention = false + return nil +} + +func (emitter *Emitter) writeTagContent(value []byte, need_whitespace bool) error { + if need_whitespace && !emitter.whitespace { + if err := emitter.put(' '); err != nil { + return err + } + } + for i := 0; i < len(value); { + var must_write bool + switch value[i] { + case ';', '/', '?', ':', '@', '&', '=', '+', '$', ',', '_', '.', '~', '*', '\'', '(', ')', '[', ']': + must_write = true + default: + must_write = isAlpha(value, i) + } + if must_write { + if err := emitter.write(value, &i); err != nil { + return err + } + } else { + w := width(value[i]) + for k := 0; k < w; k++ { + octet := value[i] + i++ + if err := emitter.put('%'); err != nil { + return err + } + + c := octet >> 4 + if c < 10 { + c += '0' + } else { + c += 'A' - 10 + } + if err := emitter.put(c); err != nil { + return err + } + + c = octet & 0x0f + if c < 10 { + c += '0' + } else { + c += 'A' - 10 + } + if err := emitter.put(c); err != nil { + return err + } + } + } + } + emitter.whitespace = false + emitter.indention = false + return nil +} + +func (emitter *Emitter) writePlainScalar(value []byte, allow_breaks bool) error { + if len(value) > 0 && !emitter.whitespace { + if err := emitter.put(' '); err != nil { + return err + } + } + + spaces := false + breaks := false + for i := 0; i < len(value); { + if isSpace(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && !isSpace(value, i+1) { + if err := emitter.writeIndent(); err != nil { + return err + } + i += width(value[i]) + } else { + if err := emitter.write(value, &i); err != nil { + return err + } + } + spaces = true + } else if isLineBreak(value, i) { + if !breaks && value[i] == '\n' { + if err := emitter.putLineBreak(); err != nil { + return err + } + } + if err := emitter.writeLineBreak(value, &i); err != nil { + return err + } + // emitter.indention = true + breaks = true + } else { + if breaks { + if err := emitter.writeIndent(); err != nil { + return err + } + } + if err := emitter.write(value, &i); err != nil { + return err + } + emitter.indention = false + spaces = false + breaks = false + } + } + + if len(value) > 0 { + emitter.whitespace = false + } + emitter.indention = false + if emitter.root_context { + emitter.OpenEnded = true + } + + return nil +} + +func (emitter *Emitter) writeSingleQuotedScalar(value []byte, allow_breaks bool) error { + if err := emitter.writeIndicator([]byte{'\''}, true, false, false); err != nil { + return err + } + + spaces := false + breaks := false + for i := 0; i < len(value); { + if isSpace(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 && !isSpace(value, i+1) { + if err := emitter.writeIndent(); err != nil { + return err + } + i += width(value[i]) + } else { + if err := emitter.write(value, &i); err != nil { + return err + } + } + spaces = true + } else if isLineBreak(value, i) { + if !breaks && value[i] == '\n' { + if err := emitter.putLineBreak(); err != nil { + return err + } + } + if err := emitter.writeLineBreak(value, &i); err != nil { + return err + } + // emitter.indention = true + breaks = true + } else { + if breaks { + if err := emitter.writeIndent(); err != nil { + return err + } + } + if value[i] == '\'' { + if err := emitter.put('\''); err != nil { + return err + } + } + if err := emitter.write(value, &i); err != nil { + return err + } + emitter.indention = false + spaces = false + breaks = false + } + } + if err := emitter.writeIndicator([]byte{'\''}, false, false, false); err != nil { + return err + } + emitter.whitespace = false + emitter.indention = false + return nil +} + +func (emitter *Emitter) writeDoubleQuotedScalar(value []byte, allow_breaks bool) error { + spaces := false + if err := emitter.writeIndicator([]byte{'"'}, true, false, false); err != nil { + return err + } + + for i := 0; i < len(value); { + if !isPrintable(value, i) || (!emitter.unicode && !isASCII(value, i)) || + isBOM(value, i) || isLineBreak(value, i) || + value[i] == '"' || value[i] == '\\' { + + octet := value[i] + + var w int + var v rune + switch { + case octet&0x80 == 0x00: + w, v = 1, rune(octet&0x7F) + case octet&0xE0 == 0xC0: + w, v = 2, rune(octet&0x1F) + case octet&0xF0 == 0xE0: + w, v = 3, rune(octet&0x0F) + case octet&0xF8 == 0xF0: + w, v = 4, rune(octet&0x07) + } + for k := 1; k < w; k++ { + octet = value[i+k] + v = (v << 6) + (rune(octet) & 0x3F) + } + i += w + + if err := emitter.put('\\'); err != nil { + return err + } + + var err error + switch v { + case 0x00: + err = emitter.put('0') + case 0x07: + err = emitter.put('a') + case 0x08: + err = emitter.put('b') + case 0x09: + err = emitter.put('t') + case 0x0A: + err = emitter.put('n') + case 0x0b: + err = emitter.put('v') + case 0x0c: + err = emitter.put('f') + case 0x0d: + err = emitter.put('r') + case 0x1b: + err = emitter.put('e') + case 0x22: + err = emitter.put('"') + case 0x5c: + err = emitter.put('\\') + case 0x85: + err = emitter.put('N') + case 0xA0: + err = emitter.put('_') + case 0x2028: + err = emitter.put('L') + case 0x2029: + err = emitter.put('P') + default: + if v <= 0xFF { + err = emitter.put('x') + w = 2 + } else if v <= 0xFFFF { + err = emitter.put('u') + w = 4 + } else { + err = emitter.put('U') + w = 8 + } + for k := (w - 1) * 4; err == nil && k >= 0; k -= 4 { + digit := byte((v >> uint(k)) & 0x0F) + if digit < 10 { + err = emitter.put(digit + '0') + } else { + err = emitter.put(digit + 'A' - 10) + } + } + } + if err != nil { + return err + } + spaces = false + } else if isSpace(value, i) { + if allow_breaks && !spaces && emitter.column > emitter.best_width && i > 0 && i < len(value)-1 { + if err := emitter.writeIndent(); err != nil { + return err + } + if isSpace(value, i+1) { + if err := emitter.put('\\'); err != nil { + return err + } + } + i += width(value[i]) + } else if err := emitter.write(value, &i); err != nil { + return err + } + spaces = true + } else { + if err := emitter.write(value, &i); err != nil { + return err + } + spaces = false + } + } + if err := emitter.writeIndicator([]byte{'"'}, false, false, false); err != nil { + return err + } + emitter.whitespace = false + emitter.indention = false + return nil +} + +func (emitter *Emitter) writeBlockScalarHints(value []byte) error { + if isSpace(value, 0) || isLineBreak(value, 0) { + indent_hint := []byte{'0' + byte(emitter.BestIndent)} + if err := emitter.writeIndicator(indent_hint, false, false, false); err != nil { + return err + } + } + + emitter.OpenEnded = false + + var chomp_hint [1]byte + if len(value) == 0 { + chomp_hint[0] = '-' + } else { + i := len(value) - 1 + for value[i]&0xC0 == 0x80 { + i-- + } + if !isLineBreak(value, i) { + chomp_hint[0] = '-' + } else if i == 0 { + chomp_hint[0] = '+' + emitter.OpenEnded = true + } else { + i-- + for value[i]&0xC0 == 0x80 { + i-- + } + if isLineBreak(value, i) { + chomp_hint[0] = '+' + emitter.OpenEnded = true + } + } + } + if chomp_hint[0] != 0 { + if err := emitter.writeIndicator(chomp_hint[:], false, false, false); err != nil { + return err + } + } + return nil +} + +func (emitter *Emitter) writeLiteralScalar(value []byte) error { + if err := emitter.writeIndicator([]byte{'|'}, true, false, false); err != nil { + return err + } + if err := emitter.writeBlockScalarHints(value); err != nil { + return err + } + if err := emitter.processLineCommentLinebreak(true); err != nil { + return err + } + // emitter.indention = true + emitter.whitespace = true + breaks := true + for i := 0; i < len(value); { + if isLineBreak(value, i) { + if err := emitter.writeLineBreak(value, &i); err != nil { + return err + } + // emitter.indention = true + breaks = true + } else { + if breaks { + if err := emitter.writeIndent(); err != nil { + return err + } + } + if err := emitter.write(value, &i); err != nil { + return err + } + emitter.indention = false + breaks = false + } + } + + return nil +} + +func (emitter *Emitter) writeFoldedScalar(value []byte) error { + if err := emitter.writeIndicator([]byte{'>'}, true, false, false); err != nil { + return err + } + if err := emitter.writeBlockScalarHints(value); err != nil { + return err + } + if err := emitter.processLineCommentLinebreak(true); err != nil { + return err + } + + // emitter.indention = true + emitter.whitespace = true + + breaks := true + leading_spaces := true + for i := 0; i < len(value); { + if isLineBreak(value, i) { + if !breaks && !leading_spaces && value[i] == '\n' { + k := 0 + for isLineBreak(value, k) { + k += width(value[k]) + } + if !isBlankOrZero(value, k) { + if err := emitter.putLineBreak(); err != nil { + return err + } + } + } + if err := emitter.writeLineBreak(value, &i); err != nil { + return err + } + // emitter.indention = true + breaks = true + } else { + if breaks { + if err := emitter.writeIndent(); err != nil { + return err + } + leading_spaces = isBlank(value, i) + } + if !breaks && isSpace(value, i) && !isSpace(value, i+1) && emitter.column > emitter.best_width { + if err := emitter.writeIndent(); err != nil { + return err + } + i += width(value[i]) + } else { + if err := emitter.write(value, &i); err != nil { + return err + } + } + emitter.indention = false + breaks = false + } + } + return nil +} + +func (emitter *Emitter) writeComment(comment []byte) error { + breaks := false + pound := false + for i := 0; i < len(comment); { + if isLineBreak(comment, i) { + if err := emitter.writeLineBreak(comment, &i); err != nil { + return err + } + // emitter.indention = true + breaks = true + pound = false + } else { + if breaks { + if err := emitter.writeIndent(); err != nil { + return err + } + } + if !pound { + if comment[i] != '#' { + if err := emitter.put('#'); err != nil { + return err + } + if err := emitter.put(' '); err != nil { + return err + } + } + pound = true + } + if err := emitter.write(comment, &i); err != nil { + return err + } + emitter.indention = false + breaks = false + } + } + if !breaks { + if err := emitter.putLineBreak(); err != nil { + return err + } + } + + emitter.whitespace = true + // emitter.indention = true + return nil +} diff --git a/vendor/go.yaml.in/yaml/v4/internal/libyaml/parser.go b/vendor/go.yaml.in/yaml/v4/internal/libyaml/parser.go new file mode 100644 index 00000000..765ce333 --- /dev/null +++ b/vendor/go.yaml.in/yaml/v4/internal/libyaml/parser.go @@ -0,0 +1,1267 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package libyaml + +import ( + "bytes" +) + +// The parser implements the following grammar: +// +// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END +// implicit_document ::= block_node DOCUMENT-END* +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// block_node_or_indentless_sequence ::= +// ALIAS +// | properties (block_content | indentless_block_sequence)? +// | block_content +// | indentless_block_sequence +// block_node ::= ALIAS +// | properties block_content? +// | block_content +// flow_node ::= ALIAS +// | properties flow_content? +// | flow_content +// properties ::= TAG ANCHOR? | ANCHOR TAG? +// block_content ::= block_collection | flow_collection | SCALAR +// flow_content ::= flow_collection | SCALAR +// block_collection ::= block_sequence | block_mapping +// flow_collection ::= flow_sequence | flow_mapping +// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END +// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ +// block_mapping ::= BLOCK-MAPPING_START +// ((KEY block_node_or_indentless_sequence?)? +// (VALUE block_node_or_indentless_sequence?)?)* +// BLOCK-END +// flow_sequence ::= FLOW-SEQUENCE-START +// (flow_sequence_entry FLOW-ENTRY)* +// flow_sequence_entry? +// FLOW-SEQUENCE-END +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// flow_mapping ::= FLOW-MAPPING-START +// (flow_mapping_entry FLOW-ENTRY)* +// flow_mapping_entry? +// FLOW-MAPPING-END +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? + +// Peek the next token in the token queue. +func (parser *Parser) peekToken() *Token { + if parser.token_available || parser.fetchMoreTokens() { + token := &parser.tokens[parser.tokens_head] + parser.UnfoldComments(token) + return token + } + return nil +} + +// UnfoldComments walks through the comments queue and joins all +// comments behind the position of the provided token into the respective +// top-level comment slices in the parser. +func (parser *Parser) UnfoldComments(token *Token) { + for parser.comments_head < len(parser.comments) && token.StartMark.Index >= parser.comments[parser.comments_head].token_mark.Index { + comment := &parser.comments[parser.comments_head] + if len(comment.head) > 0 { + if token.Type == BLOCK_END_TOKEN { + // No heads on ends, so keep comment.head for a follow up token. + break + } + if len(parser.HeadComment) > 0 { + parser.HeadComment = append(parser.HeadComment, '\n') + } + parser.HeadComment = append(parser.HeadComment, comment.head...) + } + if len(comment.foot) > 0 { + if len(parser.FootComment) > 0 { + parser.FootComment = append(parser.FootComment, '\n') + } + parser.FootComment = append(parser.FootComment, comment.foot...) + } + if len(comment.line) > 0 { + if len(parser.LineComment) > 0 { + parser.LineComment = append(parser.LineComment, '\n') + } + parser.LineComment = append(parser.LineComment, comment.line...) + } + *comment = Comment{} + parser.comments_head++ + } +} + +// Remove the next token from the queue (must be called after peek_token). +func (parser *Parser) skipToken() { + parser.token_available = false + parser.tokens_parsed++ + parser.stream_end_produced = parser.tokens[parser.tokens_head].Type == STREAM_END_TOKEN + parser.tokens_head++ +} + +// Parse gets the next event. +func (parser *Parser) Parse(event *Event) bool { + // Erase the event object. + *event = Event{} + + // No events after the end of the stream or error. + if parser.stream_end_produced || parser.ErrorType != NO_ERROR || parser.state == PARSE_END_STATE { + return true + } + + // Generate the next event. + return parser.stateMachine(event) +} + +// Set parser error. +func (parser *Parser) setParserError(problem string, problem_mark Mark) bool { + parser.ErrorType = PARSER_ERROR + parser.Problem = problem + parser.ProblemMark = problem_mark + return false +} + +func (parser *Parser) setParserErrorContext(context string, context_mark Mark, problem string, problem_mark Mark) bool { + parser.ErrorType = PARSER_ERROR + parser.Context = context + parser.ContextMark = context_mark + parser.Problem = problem + parser.ProblemMark = problem_mark + return false +} + +// State dispatcher. +func (parser *Parser) stateMachine(event *Event) bool { + // trace("yaml_parser_state_machine", "state:", parser.state.String()) + + switch parser.state { + case PARSE_STREAM_START_STATE: + return parser.parseStreamStart(event) + + case PARSE_IMPLICIT_DOCUMENT_START_STATE: + return parser.parseDocumentStart(event, true) + + case PARSE_DOCUMENT_START_STATE: + return parser.parseDocumentStart(event, false) + + case PARSE_DOCUMENT_CONTENT_STATE: + return parser.parseDocumentContent(event) + + case PARSE_DOCUMENT_END_STATE: + return parser.parseDocumentEnd(event) + + case PARSE_BLOCK_NODE_STATE: + return parser.parseNode(event, true, false) + + case PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: + return parser.parseBlockSequenceEntry(event, true) + + case PARSE_BLOCK_SEQUENCE_ENTRY_STATE: + return parser.parseBlockSequenceEntry(event, false) + + case PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: + return parser.parseIndentlessSequenceEntry(event) + + case PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: + return parser.parseBlockMappingKey(event, true) + + case PARSE_BLOCK_MAPPING_KEY_STATE: + return parser.parseBlockMappingKey(event, false) + + case PARSE_BLOCK_MAPPING_VALUE_STATE: + return parser.parseBlockMappingValue(event) + + case PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: + return parser.parseFlowSequenceEntry(event, true) + + case PARSE_FLOW_SEQUENCE_ENTRY_STATE: + return parser.parseFlowSequenceEntry(event, false) + + case PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: + return parser.parseFlowSequenceEntryMappingKey(event) + + case PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: + return parser.parseFlowSequenceEntryMappingValue(event) + + case PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: + return parser.parseFlowSequenceEntryMappingEnd(event) + + case PARSE_FLOW_MAPPING_FIRST_KEY_STATE: + return parser.parseFlowMappingKey(event, true) + + case PARSE_FLOW_MAPPING_KEY_STATE: + return parser.parseFlowMappingKey(event, false) + + case PARSE_FLOW_MAPPING_VALUE_STATE: + return parser.parseFlowMappingValue(event, false) + + case PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: + return parser.parseFlowMappingValue(event, true) + + default: + panic("invalid parser state") + } +} + +// Parse the production: +// stream ::= STREAM-START implicit_document? explicit_document* STREAM-END +// +// ************ +func (parser *Parser) parseStreamStart(event *Event) bool { + token := parser.peekToken() + if token == nil { + return false + } + if token.Type != STREAM_START_TOKEN { + return parser.setParserError("did not find expected ", token.StartMark) + } + parser.state = PARSE_IMPLICIT_DOCUMENT_START_STATE + *event = Event{ + Type: STREAM_START_EVENT, + StartMark: token.StartMark, + EndMark: token.EndMark, + encoding: token.encoding, + } + parser.skipToken() + return true +} + +// Parse the productions: +// implicit_document ::= block_node DOCUMENT-END* +// +// * +// +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// +// ************************* +func (parser *Parser) parseDocumentStart(event *Event, implicit bool) bool { + token := parser.peekToken() + if token == nil { + return false + } + + // Parse extra document end indicators. + if !implicit { + for token.Type == DOCUMENT_END_TOKEN { + parser.skipToken() + token = parser.peekToken() + if token == nil { + return false + } + } + } + + if implicit && token.Type != VERSION_DIRECTIVE_TOKEN && + token.Type != TAG_DIRECTIVE_TOKEN && + token.Type != DOCUMENT_START_TOKEN && + token.Type != STREAM_END_TOKEN { + // Parse an implicit document. + if !parser.processDirectives(nil, nil) { + return false + } + parser.states = append(parser.states, PARSE_DOCUMENT_END_STATE) + parser.state = PARSE_BLOCK_NODE_STATE + + var head_comment []byte + if len(parser.HeadComment) > 0 { + // [Go] Scan the header comment backwards, and if an empty line is found, break + // the header so the part before the last empty line goes into the + // document header, while the bottom of it goes into a follow up event. + for i := len(parser.HeadComment) - 1; i > 0; i-- { + if parser.HeadComment[i] == '\n' { + if i == len(parser.HeadComment)-1 { + head_comment = parser.HeadComment[:i] + parser.HeadComment = parser.HeadComment[i+1:] + break + } else if parser.HeadComment[i-1] == '\n' { + head_comment = parser.HeadComment[:i-1] + parser.HeadComment = parser.HeadComment[i+1:] + break + } + } + } + } + + *event = Event{ + Type: DOCUMENT_START_EVENT, + StartMark: token.StartMark, + EndMark: token.EndMark, + Implicit: true, + + HeadComment: head_comment, + } + + } else if token.Type != STREAM_END_TOKEN { + // Parse an explicit document. + var version_directive *VersionDirective + var tag_directives []TagDirective + start_mark := token.StartMark + if !parser.processDirectives(&version_directive, &tag_directives) { + return false + } + token = parser.peekToken() + if token == nil { + return false + } + if token.Type != DOCUMENT_START_TOKEN { + parser.setParserError( + "did not find expected ", token.StartMark) + return false + } + parser.states = append(parser.states, PARSE_DOCUMENT_END_STATE) + parser.state = PARSE_DOCUMENT_CONTENT_STATE + end_mark := token.EndMark + + *event = Event{ + Type: DOCUMENT_START_EVENT, + StartMark: start_mark, + EndMark: end_mark, + version_directive: version_directive, + tag_directives: tag_directives, + Implicit: false, + } + parser.skipToken() + + } else { + // Parse the stream end. + parser.state = PARSE_END_STATE + *event = Event{ + Type: STREAM_END_EVENT, + StartMark: token.StartMark, + EndMark: token.EndMark, + } + parser.skipToken() + } + + return true +} + +// Parse the productions: +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +// +// *********** +func (parser *Parser) parseDocumentContent(event *Event) bool { + token := parser.peekToken() + if token == nil { + return false + } + + if token.Type == VERSION_DIRECTIVE_TOKEN || + token.Type == TAG_DIRECTIVE_TOKEN || + token.Type == DOCUMENT_START_TOKEN || + token.Type == DOCUMENT_END_TOKEN || + token.Type == STREAM_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + return parser.processEmptyScalar(event, + token.StartMark) + } + return parser.parseNode(event, true, false) +} + +// Parse the productions: +// implicit_document ::= block_node DOCUMENT-END* +// +// ************* +// +// explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END* +func (parser *Parser) parseDocumentEnd(event *Event) bool { + token := parser.peekToken() + if token == nil { + return false + } + + start_mark := token.StartMark + end_mark := token.StartMark + + implicit := true + if token.Type == DOCUMENT_END_TOKEN { + end_mark = token.EndMark + parser.skipToken() + implicit = false + } + + parser.tag_directives = parser.tag_directives[:0] + + parser.state = PARSE_DOCUMENT_START_STATE + *event = Event{ + Type: DOCUMENT_END_EVENT, + StartMark: start_mark, + EndMark: end_mark, + Implicit: implicit, + } + parser.setEventComments(event) + if len(event.HeadComment) > 0 && len(event.FootComment) == 0 { + event.FootComment = event.HeadComment + event.HeadComment = nil + } + return true +} + +func (parser *Parser) setEventComments(event *Event) { + event.HeadComment = parser.HeadComment + event.LineComment = parser.LineComment + event.FootComment = parser.FootComment + parser.HeadComment = nil + parser.LineComment = nil + parser.FootComment = nil + parser.tail_comment = nil + parser.stem_comment = nil +} + +// Parse the productions: +// block_node_or_indentless_sequence ::= +// +// ALIAS +// ***** +// | properties (block_content | indentless_block_sequence)? +// ********** * +// | block_content | indentless_block_sequence +// * +// +// block_node ::= ALIAS +// +// ***** +// | properties block_content? +// ********** * +// | block_content +// * +// +// flow_node ::= ALIAS +// +// ***** +// | properties flow_content? +// ********** * +// | flow_content +// * +// +// properties ::= TAG ANCHOR? | ANCHOR TAG? +// +// ************************* +// +// block_content ::= block_collection | flow_collection | SCALAR +// +// ****** +// +// flow_content ::= flow_collection | SCALAR +// +// ****** +func (parser *Parser) parseNode(event *Event, block, indentless_sequence bool) bool { + // defer trace("yaml_parser_parse_node", "block:", block, "indentless_sequence:", indentless_sequence)() + + token := parser.peekToken() + if token == nil { + return false + } + + if token.Type == ALIAS_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + *event = Event{ + Type: ALIAS_EVENT, + StartMark: token.StartMark, + EndMark: token.EndMark, + Anchor: token.Value, + } + parser.setEventComments(event) + parser.skipToken() + return true + } + + start_mark := token.StartMark + end_mark := token.StartMark + + var tag_token bool + var tag_handle, tag_suffix, anchor []byte + var tag_mark Mark + switch token.Type { + case ANCHOR_TOKEN: + anchor = token.Value + start_mark = token.StartMark + end_mark = token.EndMark + parser.skipToken() + token = parser.peekToken() + if token == nil { + return false + } + if token.Type == TAG_TOKEN { + tag_token = true + tag_handle = token.Value + tag_suffix = token.suffix + tag_mark = token.StartMark + end_mark = token.EndMark + parser.skipToken() + token = parser.peekToken() + if token == nil { + return false + } + } + case TAG_TOKEN: + tag_token = true + tag_handle = token.Value + tag_suffix = token.suffix + start_mark = token.StartMark + tag_mark = token.StartMark + end_mark = token.EndMark + parser.skipToken() + token = parser.peekToken() + if token == nil { + return false + } + if token.Type == ANCHOR_TOKEN { + anchor = token.Value + end_mark = token.EndMark + parser.skipToken() + token = parser.peekToken() + if token == nil { + return false + } + } + } + + var tag []byte + if tag_token { + if len(tag_handle) == 0 { + tag = tag_suffix + } else { + for i := range parser.tag_directives { + if bytes.Equal(parser.tag_directives[i].handle, tag_handle) { + tag = append([]byte(nil), parser.tag_directives[i].prefix...) + tag = append(tag, tag_suffix...) + break + } + } + if len(tag) == 0 { + parser.setParserErrorContext( + "while parsing a node", start_mark, + "found undefined tag handle", tag_mark) + return false + } + } + } + + implicit := len(tag) == 0 + if indentless_sequence && token.Type == BLOCK_ENTRY_TOKEN { + end_mark = token.EndMark + parser.state = PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE + *event = Event{ + Type: SEQUENCE_START_EVENT, + StartMark: start_mark, + EndMark: end_mark, + Anchor: anchor, + Tag: tag, + Implicit: implicit, + Style: Style(BLOCK_SEQUENCE_STYLE), + } + return true + } + if token.Type == SCALAR_TOKEN { + var plain_implicit, quoted_implicit bool + end_mark = token.EndMark + if (len(tag) == 0 && token.Style == PLAIN_SCALAR_STYLE) || (len(tag) == 1 && tag[0] == '!') { + plain_implicit = true + } else if len(tag) == 0 { + quoted_implicit = true + } + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = Event{ + Type: SCALAR_EVENT, + StartMark: start_mark, + EndMark: end_mark, + Anchor: anchor, + Tag: tag, + Value: token.Value, + Implicit: plain_implicit, + quoted_implicit: quoted_implicit, + Style: Style(token.Style), + } + parser.setEventComments(event) + parser.skipToken() + return true + } + if token.Type == FLOW_SEQUENCE_START_TOKEN { + // [Go] Some of the events below can be merged as they differ only on style. + end_mark = token.EndMark + parser.state = PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE + *event = Event{ + Type: SEQUENCE_START_EVENT, + StartMark: start_mark, + EndMark: end_mark, + Anchor: anchor, + Tag: tag, + Implicit: implicit, + Style: Style(FLOW_SEQUENCE_STYLE), + } + parser.setEventComments(event) + return true + } + if token.Type == FLOW_MAPPING_START_TOKEN { + end_mark = token.EndMark + parser.state = PARSE_FLOW_MAPPING_FIRST_KEY_STATE + *event = Event{ + Type: MAPPING_START_EVENT, + StartMark: start_mark, + EndMark: end_mark, + Anchor: anchor, + Tag: tag, + Implicit: implicit, + Style: Style(FLOW_MAPPING_STYLE), + } + parser.setEventComments(event) + return true + } + if block && token.Type == BLOCK_SEQUENCE_START_TOKEN { + end_mark = token.EndMark + parser.state = PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE + *event = Event{ + Type: SEQUENCE_START_EVENT, + StartMark: start_mark, + EndMark: end_mark, + Anchor: anchor, + Tag: tag, + Implicit: implicit, + Style: Style(BLOCK_SEQUENCE_STYLE), + } + if parser.stem_comment != nil { + event.HeadComment = parser.stem_comment + parser.stem_comment = nil + } + return true + } + if block && token.Type == BLOCK_MAPPING_START_TOKEN { + end_mark = token.EndMark + parser.state = PARSE_BLOCK_MAPPING_FIRST_KEY_STATE + *event = Event{ + Type: MAPPING_START_EVENT, + StartMark: start_mark, + EndMark: end_mark, + Anchor: anchor, + Tag: tag, + Implicit: implicit, + Style: Style(BLOCK_MAPPING_STYLE), + } + if parser.stem_comment != nil { + event.HeadComment = parser.stem_comment + parser.stem_comment = nil + } + return true + } + if len(anchor) > 0 || len(tag) > 0 { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = Event{ + Type: SCALAR_EVENT, + StartMark: start_mark, + EndMark: end_mark, + Anchor: anchor, + Tag: tag, + Implicit: implicit, + quoted_implicit: false, + Style: Style(PLAIN_SCALAR_STYLE), + } + return true + } + + context := "while parsing a flow node" + if block { + context = "while parsing a block node" + } + parser.setParserErrorContext(context, start_mark, + "did not find expected node content", token.StartMark) + return false +} + +// Parse the productions: +// block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END +// +// ******************** *********** * ********* +func (parser *Parser) parseBlockSequenceEntry(event *Event, first bool) bool { + if first { + token := parser.peekToken() + if token == nil { + return false + } + parser.marks = append(parser.marks, token.StartMark) + parser.skipToken() + } + + token := parser.peekToken() + if token == nil { + return false + } + + if token.Type == BLOCK_ENTRY_TOKEN { + mark := token.EndMark + prior_head_len := len(parser.HeadComment) + parser.skipToken() + parser.splitStemComment(prior_head_len) + token = parser.peekToken() + if token == nil { + return false + } + if token.Type != BLOCK_ENTRY_TOKEN && token.Type != BLOCK_END_TOKEN { + parser.states = append(parser.states, PARSE_BLOCK_SEQUENCE_ENTRY_STATE) + return parser.parseNode(event, true, false) + } else { + parser.state = PARSE_BLOCK_SEQUENCE_ENTRY_STATE + return parser.processEmptyScalar(event, mark) + } + } + if token.Type == BLOCK_END_TOKEN { + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + + *event = Event{ + Type: SEQUENCE_END_EVENT, + StartMark: token.StartMark, + EndMark: token.EndMark, + } + + parser.skipToken() + return true + } + + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return parser.setParserErrorContext( + "while parsing a block collection", context_mark, + "did not find expected '-' indicator", token.StartMark) +} + +// Parse the productions: +// indentless_sequence ::= (BLOCK-ENTRY block_node?)+ +// +// *********** * +func (parser *Parser) parseIndentlessSequenceEntry(event *Event) bool { + token := parser.peekToken() + if token == nil { + return false + } + + if token.Type == BLOCK_ENTRY_TOKEN { + mark := token.EndMark + prior_head_len := len(parser.HeadComment) + parser.skipToken() + parser.splitStemComment(prior_head_len) + token = parser.peekToken() + if token == nil { + return false + } + if token.Type != BLOCK_ENTRY_TOKEN && + token.Type != KEY_TOKEN && + token.Type != VALUE_TOKEN && + token.Type != BLOCK_END_TOKEN { + parser.states = append(parser.states, PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE) + return parser.parseNode(event, true, false) + } + parser.state = PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE + return parser.processEmptyScalar(event, mark) + } + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + + *event = Event{ + Type: SEQUENCE_END_EVENT, + StartMark: token.StartMark, + EndMark: token.StartMark, // [Go] Shouldn't this be token.end_mark? + } + return true +} + +// Split stem comment from head comment. +// +// When a sequence or map is found under a sequence entry, the former head comment +// is assigned to the underlying sequence or map as a whole, not the individual +// sequence or map entry as would be expected otherwise. To handle this case the +// previous head comment is moved aside as the stem comment. +func (parser *Parser) splitStemComment(stem_len int) { + if stem_len == 0 { + return + } + + token := parser.peekToken() + if token == nil || token.Type != BLOCK_SEQUENCE_START_TOKEN && token.Type != BLOCK_MAPPING_START_TOKEN { + return + } + + parser.stem_comment = parser.HeadComment[:stem_len] + if len(parser.HeadComment) == stem_len { + parser.HeadComment = nil + } else { + // Copy suffix to prevent very strange bugs if someone ever appends + // further bytes to the prefix in the stem_comment slice above. + parser.HeadComment = append([]byte(nil), parser.HeadComment[stem_len+1:]...) + } +} + +// Parse the productions: +// block_mapping ::= BLOCK-MAPPING_START +// +// ******************* +// ((KEY block_node_or_indentless_sequence?)? +// *** * +// (VALUE block_node_or_indentless_sequence?)?)* +// +// BLOCK-END +// ********* +func (parser *Parser) parseBlockMappingKey(event *Event, first bool) bool { + if first { + token := parser.peekToken() + if token == nil { + return false + } + parser.marks = append(parser.marks, token.StartMark) + parser.skipToken() + } + + token := parser.peekToken() + if token == nil { + return false + } + + // [Go] A tail comment was left from the prior mapping value processed. Emit an event + // as it needs to be processed with that value and not the following key. + if len(parser.tail_comment) > 0 { + *event = Event{ + Type: TAIL_COMMENT_EVENT, + StartMark: token.StartMark, + EndMark: token.EndMark, + FootComment: parser.tail_comment, + } + parser.tail_comment = nil + return true + } + + switch token.Type { + case KEY_TOKEN: + mark := token.EndMark + parser.skipToken() + token = parser.peekToken() + if token == nil { + return false + } + if token.Type != KEY_TOKEN && + token.Type != VALUE_TOKEN && + token.Type != BLOCK_END_TOKEN { + parser.states = append(parser.states, PARSE_BLOCK_MAPPING_VALUE_STATE) + return parser.parseNode(event, true, true) + } else { + parser.state = PARSE_BLOCK_MAPPING_VALUE_STATE + return parser.processEmptyScalar(event, mark) + } + case BLOCK_END_TOKEN: + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + *event = Event{ + Type: MAPPING_END_EVENT, + StartMark: token.StartMark, + EndMark: token.EndMark, + } + parser.setEventComments(event) + parser.skipToken() + return true + } + + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return parser.setParserErrorContext( + "while parsing a block mapping", context_mark, + "did not find expected key", token.StartMark) +} + +// Parse the productions: +// block_mapping ::= BLOCK-MAPPING_START +// +// ((KEY block_node_or_indentless_sequence?)? +// +// (VALUE block_node_or_indentless_sequence?)?)* +// ***** * +// BLOCK-END +func (parser *Parser) parseBlockMappingValue(event *Event) bool { + token := parser.peekToken() + if token == nil { + return false + } + if token.Type == VALUE_TOKEN { + mark := token.EndMark + parser.skipToken() + token = parser.peekToken() + if token == nil { + return false + } + if token.Type != KEY_TOKEN && + token.Type != VALUE_TOKEN && + token.Type != BLOCK_END_TOKEN { + parser.states = append(parser.states, PARSE_BLOCK_MAPPING_KEY_STATE) + return parser.parseNode(event, true, true) + } + parser.state = PARSE_BLOCK_MAPPING_KEY_STATE + return parser.processEmptyScalar(event, mark) + } + parser.state = PARSE_BLOCK_MAPPING_KEY_STATE + return parser.processEmptyScalar(event, token.StartMark) +} + +// Parse the productions: +// flow_sequence ::= FLOW-SEQUENCE-START +// +// ******************* +// (flow_sequence_entry FLOW-ENTRY)* +// * ********** +// flow_sequence_entry? +// * +// FLOW-SEQUENCE-END +// ***************** +// +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// +// * +func (parser *Parser) parseFlowSequenceEntry(event *Event, first bool) bool { + if first { + token := parser.peekToken() + if token == nil { + return false + } + parser.marks = append(parser.marks, token.StartMark) + parser.skipToken() + } + token := parser.peekToken() + if token == nil { + return false + } + if token.Type != FLOW_SEQUENCE_END_TOKEN { + if !first { + if token.Type == FLOW_ENTRY_TOKEN { + parser.skipToken() + token = parser.peekToken() + if token == nil { + return false + } + } else { + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return parser.setParserErrorContext( + "while parsing a flow sequence", context_mark, + "did not find expected ',' or ']'", token.StartMark) + } + } + + if token.Type == KEY_TOKEN { + parser.state = PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE + *event = Event{ + Type: MAPPING_START_EVENT, + StartMark: token.StartMark, + EndMark: token.EndMark, + Implicit: true, + Style: Style(FLOW_MAPPING_STYLE), + } + parser.skipToken() + return true + } else if token.Type != FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, PARSE_FLOW_SEQUENCE_ENTRY_STATE) + return parser.parseNode(event, false, false) + } + } + + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + + *event = Event{ + Type: SEQUENCE_END_EVENT, + StartMark: token.StartMark, + EndMark: token.EndMark, + } + parser.setEventComments(event) + + parser.skipToken() + return true +} + +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// +// *** * +func (parser *Parser) parseFlowSequenceEntryMappingKey(event *Event) bool { + token := parser.peekToken() + if token == nil { + return false + } + if token.Type != VALUE_TOKEN && + token.Type != FLOW_ENTRY_TOKEN && + token.Type != FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE) + return parser.parseNode(event, false, false) + } + mark := token.EndMark + parser.skipToken() + parser.state = PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE + return parser.processEmptyScalar(event, mark) +} + +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// +// ***** * +func (parser *Parser) parseFlowSequenceEntryMappingValue(event *Event) bool { + token := parser.peekToken() + if token == nil { + return false + } + if token.Type == VALUE_TOKEN { + parser.skipToken() + token := parser.peekToken() + if token == nil { + return false + } + if token.Type != FLOW_ENTRY_TOKEN && token.Type != FLOW_SEQUENCE_END_TOKEN { + parser.states = append(parser.states, PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE) + return parser.parseNode(event, false, false) + } + } + parser.state = PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE + return parser.processEmptyScalar(event, token.StartMark) +} + +// Parse the productions: +// flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// +// * +func (parser *Parser) parseFlowSequenceEntryMappingEnd(event *Event) bool { + token := parser.peekToken() + if token == nil { + return false + } + parser.state = PARSE_FLOW_SEQUENCE_ENTRY_STATE + *event = Event{ + Type: MAPPING_END_EVENT, + StartMark: token.StartMark, + EndMark: token.StartMark, // [Go] Shouldn't this be end_mark? + } + return true +} + +// Parse the productions: +// flow_mapping ::= FLOW-MAPPING-START +// +// ****************** +// (flow_mapping_entry FLOW-ENTRY)* +// * ********** +// flow_mapping_entry? +// ****************** +// FLOW-MAPPING-END +// **************** +// +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// - *** * +func (parser *Parser) parseFlowMappingKey(event *Event, first bool) bool { + if first { + token := parser.peekToken() + parser.marks = append(parser.marks, token.StartMark) + parser.skipToken() + } + + token := parser.peekToken() + if token == nil { + return false + } + + if token.Type != FLOW_MAPPING_END_TOKEN { + if !first { + if token.Type == FLOW_ENTRY_TOKEN { + parser.skipToken() + token = parser.peekToken() + if token == nil { + return false + } + } else { + context_mark := parser.marks[len(parser.marks)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + return parser.setParserErrorContext( + "while parsing a flow mapping", context_mark, + "did not find expected ',' or '}'", token.StartMark) + } + } + + if token.Type == KEY_TOKEN { + parser.skipToken() + token = parser.peekToken() + if token == nil { + return false + } + if token.Type != VALUE_TOKEN && + token.Type != FLOW_ENTRY_TOKEN && + token.Type != FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, PARSE_FLOW_MAPPING_VALUE_STATE) + return parser.parseNode(event, false, false) + } else { + parser.state = PARSE_FLOW_MAPPING_VALUE_STATE + return parser.processEmptyScalar(event, token.StartMark) + } + } else if token.Type != FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE) + return parser.parseNode(event, false, false) + } + } + + parser.state = parser.states[len(parser.states)-1] + parser.states = parser.states[:len(parser.states)-1] + parser.marks = parser.marks[:len(parser.marks)-1] + *event = Event{ + Type: MAPPING_END_EVENT, + StartMark: token.StartMark, + EndMark: token.EndMark, + } + parser.setEventComments(event) + parser.skipToken() + return true +} + +// Parse the productions: +// flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)? +// - ***** * +func (parser *Parser) parseFlowMappingValue(event *Event, empty bool) bool { + token := parser.peekToken() + if token == nil { + return false + } + if empty { + parser.state = PARSE_FLOW_MAPPING_KEY_STATE + return parser.processEmptyScalar(event, token.StartMark) + } + if token.Type == VALUE_TOKEN { + parser.skipToken() + token = parser.peekToken() + if token == nil { + return false + } + if token.Type != FLOW_ENTRY_TOKEN && token.Type != FLOW_MAPPING_END_TOKEN { + parser.states = append(parser.states, PARSE_FLOW_MAPPING_KEY_STATE) + return parser.parseNode(event, false, false) + } + } + parser.state = PARSE_FLOW_MAPPING_KEY_STATE + return parser.processEmptyScalar(event, token.StartMark) +} + +// Generate an empty scalar event. +func (parser *Parser) processEmptyScalar(event *Event, mark Mark) bool { + *event = Event{ + Type: SCALAR_EVENT, + StartMark: mark, + EndMark: mark, + Value: nil, // Empty + Implicit: true, + Style: Style(PLAIN_SCALAR_STYLE), + } + return true +} + +var default_tag_directives = []TagDirective{ + {[]byte("!"), []byte("!")}, + {[]byte("!!"), []byte("tag:yaml.org,2002:")}, +} + +// Parse directives. +func (parser *Parser) processDirectives(version_directive_ref **VersionDirective, tag_directives_ref *[]TagDirective) bool { + var version_directive *VersionDirective + var tag_directives []TagDirective + + token := parser.peekToken() + if token == nil { + return false + } + + for token.Type == VERSION_DIRECTIVE_TOKEN || token.Type == TAG_DIRECTIVE_TOKEN { + switch token.Type { + case VERSION_DIRECTIVE_TOKEN: + if version_directive != nil { + parser.setParserError( + "found duplicate %YAML directive", token.StartMark) + return false + } + if token.major != 1 || token.minor != 1 { + parser.setParserError( + "found incompatible YAML document", token.StartMark) + return false + } + version_directive = &VersionDirective{ + major: token.major, + minor: token.minor, + } + case TAG_DIRECTIVE_TOKEN: + value := TagDirective{ + handle: token.Value, + prefix: token.prefix, + } + if !parser.appendTagDirective(value, false, token.StartMark) { + return false + } + tag_directives = append(tag_directives, value) + } + + parser.skipToken() + token = parser.peekToken() + if token == nil { + return false + } + } + + for i := range default_tag_directives { + if !parser.appendTagDirective(default_tag_directives[i], true, token.StartMark) { + return false + } + } + + if version_directive_ref != nil { + *version_directive_ref = version_directive + } + if tag_directives_ref != nil { + *tag_directives_ref = tag_directives + } + return true +} + +// Append a tag directive to the directives stack. +func (parser *Parser) appendTagDirective(value TagDirective, allow_duplicates bool, mark Mark) bool { + for i := range parser.tag_directives { + if bytes.Equal(value.handle, parser.tag_directives[i].handle) { + if allow_duplicates { + return true + } + return parser.setParserError("found duplicate %TAG directive", mark) + } + } + + // [Go] I suspect the copy is unnecessary. This was likely done + // because there was no way to track ownership of the data. + value_copy := TagDirective{ + handle: make([]byte, len(value.handle)), + prefix: make([]byte, len(value.prefix)), + } + copy(value_copy.handle, value.handle) + copy(value_copy.prefix, value.prefix) + parser.tag_directives = append(parser.tag_directives, value_copy) + return true +} diff --git a/vendor/go.yaml.in/yaml/v4/internal/libyaml/reader.go b/vendor/go.yaml.in/yaml/v4/internal/libyaml/reader.go new file mode 100644 index 00000000..7e690378 --- /dev/null +++ b/vendor/go.yaml.in/yaml/v4/internal/libyaml/reader.go @@ -0,0 +1,436 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package libyaml + +import ( + "io" +) + +// Set the reader error and return 0. +func (parser *Parser) setReaderError(problem string, offset int, value int) bool { + parser.ErrorType = READER_ERROR + parser.Problem = problem + parser.ProblemOffset = offset + parser.ProblemValue = value + return false +} + +// Byte order marks. +const ( + bom_UTF8 = "\xef\xbb\xbf" + bom_UTF16LE = "\xff\xfe" + bom_UTF16BE = "\xfe\xff" +) + +// Determine the input stream encoding by checking the BOM symbol. If no BOM is +// found, the UTF-8 encoding is assumed. Return 1 on success, 0 on failure. +func (parser *Parser) determineEncoding() bool { + // Ensure that we had enough bytes in the raw buffer. + for !parser.eof && len(parser.raw_buffer)-parser.raw_buffer_pos < 3 { + if !parser.updateRawBuffer() { + return false + } + } + + // Determine the encoding. + buf := parser.raw_buffer + pos := parser.raw_buffer_pos + avail := len(buf) - pos + if avail >= 2 && buf[pos] == bom_UTF16LE[0] && buf[pos+1] == bom_UTF16LE[1] { + parser.encoding = UTF16LE_ENCODING + parser.raw_buffer_pos += 2 + parser.offset += 2 + } else if avail >= 2 && buf[pos] == bom_UTF16BE[0] && buf[pos+1] == bom_UTF16BE[1] { + parser.encoding = UTF16BE_ENCODING + parser.raw_buffer_pos += 2 + parser.offset += 2 + } else if avail >= 3 && buf[pos] == bom_UTF8[0] && buf[pos+1] == bom_UTF8[1] && buf[pos+2] == bom_UTF8[2] { + parser.encoding = UTF8_ENCODING + parser.raw_buffer_pos += 3 + parser.offset += 3 + } else { + parser.encoding = UTF8_ENCODING + } + return true +} + +// Update the raw buffer. +func (parser *Parser) updateRawBuffer() bool { + size_read := 0 + + // Return if the raw buffer is full. + if parser.raw_buffer_pos == 0 && len(parser.raw_buffer) == cap(parser.raw_buffer) { + return true + } + + // Return on EOF. + if parser.eof { + return true + } + + // Move the remaining bytes in the raw buffer to the beginning. + if parser.raw_buffer_pos > 0 && parser.raw_buffer_pos < len(parser.raw_buffer) { + copy(parser.raw_buffer, parser.raw_buffer[parser.raw_buffer_pos:]) + } + parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)-parser.raw_buffer_pos] + parser.raw_buffer_pos = 0 + + // Call the read handler to fill the buffer. + size_read, err := parser.read_handler(parser, parser.raw_buffer[len(parser.raw_buffer):cap(parser.raw_buffer)]) + parser.raw_buffer = parser.raw_buffer[:len(parser.raw_buffer)+size_read] + if err == io.EOF { + parser.eof = true + } else if err != nil { + return parser.setReaderError("input error: "+err.Error(), parser.offset, -1) + } + return true +} + +// Ensure that the buffer contains at least `length` characters. +// Return true on success, false on failure. +// +// The length is supposed to be significantly less that the buffer size. +func (parser *Parser) updateBuffer(length int) bool { + if parser.read_handler == nil { + panic("read handler must be set") + } + + // [Go] This function was changed to guarantee the requested length size at EOF. + // The fact we need to do this is pretty awful, but the description above implies + // for that to be the case, and there are tests + + // If the EOF flag is set and the raw buffer is empty, do nothing. + // + //nolint:staticcheck // there is no problem with this empty branch as it's documentation. + if parser.eof && parser.raw_buffer_pos == len(parser.raw_buffer) { + // [Go] ACTUALLY! Read the documentation of this function above. + // This is just broken. To return true, we need to have the + // given length in the buffer. Not doing that means every single + // check that calls this function to make sure the buffer has a + // given length is Go) panicking; or C) accessing invalid memory. + // return true + } + + // Return if the buffer contains enough characters. + if parser.unread >= length { + return true + } + + // Determine the input encoding if it is not known yet. + if parser.encoding == ANY_ENCODING { + if !parser.determineEncoding() { + return false + } + } + + // Move the unread characters to the beginning of the buffer. + buffer_len := len(parser.buffer) + if parser.buffer_pos > 0 && parser.buffer_pos < buffer_len { + copy(parser.buffer, parser.buffer[parser.buffer_pos:]) + buffer_len -= parser.buffer_pos + parser.buffer_pos = 0 + } else if parser.buffer_pos == buffer_len { + buffer_len = 0 + parser.buffer_pos = 0 + } + + // Open the whole buffer for writing, and cut it before returning. + parser.buffer = parser.buffer[:cap(parser.buffer)] + + // Fill the buffer until it has enough characters. + first := true + for parser.unread < length { + + // Fill the raw buffer if necessary. + if !first || parser.raw_buffer_pos == len(parser.raw_buffer) { + if !parser.updateRawBuffer() { + parser.buffer = parser.buffer[:buffer_len] + return false + } + } + first = false + + // Decode the raw buffer. + inner: + for parser.raw_buffer_pos != len(parser.raw_buffer) { + var value rune + var width int + + raw_unread := len(parser.raw_buffer) - parser.raw_buffer_pos + + // Decode the next character. + switch parser.encoding { + case UTF8_ENCODING: + // Decode a UTF-8 character. Check RFC 3629 + // (http://www.ietf.org/rfc/rfc3629.txt) for more details. + // + // The following table (taken from the RFC) is used for + // decoding. + // + // Char. number range | UTF-8 octet sequence + // (hexadecimal) | (binary) + // --------------------+------------------------------------ + // 0000 0000-0000 007F | 0xxxxxxx + // 0000 0080-0000 07FF | 110xxxxx 10xxxxxx + // 0000 0800-0000 FFFF | 1110xxxx 10xxxxxx 10xxxxxx + // 0001 0000-0010 FFFF | 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx + // + // Additionally, the characters in the range 0xD800-0xDFFF + // are prohibited as they are reserved for use with UTF-16 + // surrogate pairs. + + // Determine the length of the UTF-8 sequence. + octet := parser.raw_buffer[parser.raw_buffer_pos] + switch { + case octet&0x80 == 0x00: + width = 1 + case octet&0xE0 == 0xC0: + width = 2 + case octet&0xF0 == 0xE0: + width = 3 + case octet&0xF8 == 0xF0: + width = 4 + default: + // The leading octet is invalid. + return parser.setReaderError( + "invalid leading UTF-8 octet", + parser.offset, int(octet)) + } + + // Check if the raw buffer contains an incomplete character. + if width > raw_unread { + if parser.eof { + return parser.setReaderError( + "incomplete UTF-8 octet sequence", + parser.offset, -1) + } + break inner + } + + // Decode the leading octet. + switch { + case octet&0x80 == 0x00: + value = rune(octet & 0x7F) + case octet&0xE0 == 0xC0: + value = rune(octet & 0x1F) + case octet&0xF0 == 0xE0: + value = rune(octet & 0x0F) + case octet&0xF8 == 0xF0: + value = rune(octet & 0x07) + default: + value = 0 + } + + // Check and decode the trailing octets. + for k := 1; k < width; k++ { + octet = parser.raw_buffer[parser.raw_buffer_pos+k] + + // Check if the octet is valid. + if (octet & 0xC0) != 0x80 { + return parser.setReaderError( + "invalid trailing UTF-8 octet", + parser.offset+k, int(octet)) + } + + // Decode the octet. + value = (value << 6) + rune(octet&0x3F) + } + + // Check the length of the sequence against the value. + switch { + case width == 1: + case width == 2 && value >= 0x80: + case width == 3 && value >= 0x800: + case width == 4 && value >= 0x10000: + default: + return parser.setReaderError( + "invalid length of a UTF-8 sequence", + parser.offset, -1) + } + + // Check the range of the value. + if value >= 0xD800 && value <= 0xDFFF || value > 0x10FFFF { + return parser.setReaderError( + "invalid Unicode character", + parser.offset, int(value)) + } + + case UTF16LE_ENCODING, UTF16BE_ENCODING: + var low, high int + if parser.encoding == UTF16LE_ENCODING { + low, high = 0, 1 + } else { + low, high = 1, 0 + } + + // The UTF-16 encoding is not as simple as one might + // naively think. Check RFC 2781 + // (http://www.ietf.org/rfc/rfc2781.txt). + // + // Normally, two subsequent bytes describe a Unicode + // character. However a special technique (called a + // surrogate pair) is used for specifying character + // values larger than 0xFFFF. + // + // A surrogate pair consists of two pseudo-characters: + // high surrogate area (0xD800-0xDBFF) + // low surrogate area (0xDC00-0xDFFF) + // + // The following formulas are used for decoding + // and encoding characters using surrogate pairs: + // + // U = U' + 0x10000 (0x01 00 00 <= U <= 0x10 FF FF) + // U' = yyyyyyyyyyxxxxxxxxxx (0 <= U' <= 0x0F FF FF) + // W1 = 110110yyyyyyyyyy + // W2 = 110111xxxxxxxxxx + // + // where U is the character value, W1 is the high surrogate + // area, W2 is the low surrogate area. + + // Check for incomplete UTF-16 character. + if raw_unread < 2 { + if parser.eof { + return parser.setReaderError( + "incomplete UTF-16 character", + parser.offset, -1) + } + break inner + } + + // Get the character. + value = rune(parser.raw_buffer[parser.raw_buffer_pos+low]) + + (rune(parser.raw_buffer[parser.raw_buffer_pos+high]) << 8) + + // Check for unexpected low surrogate area. + if value&0xFC00 == 0xDC00 { + return parser.setReaderError( + "unexpected low surrogate area", + parser.offset, int(value)) + } + + // Check for a high surrogate area. + if value&0xFC00 == 0xD800 { + width = 4 + + // Check for incomplete surrogate pair. + if raw_unread < 4 { + if parser.eof { + return parser.setReaderError( + "incomplete UTF-16 surrogate pair", + parser.offset, -1) + } + break inner + } + + // Get the next character. + value2 := rune(parser.raw_buffer[parser.raw_buffer_pos+low+2]) + + (rune(parser.raw_buffer[parser.raw_buffer_pos+high+2]) << 8) + + // Check for a low surrogate area. + if value2&0xFC00 != 0xDC00 { + return parser.setReaderError( + "expected low surrogate area", + parser.offset+2, int(value2)) + } + + // Generate the value of the surrogate pair. + value = 0x10000 + ((value & 0x3FF) << 10) + (value2 & 0x3FF) + } else { + width = 2 + } + + default: + panic("impossible") + } + + // Check if the character is in the allowed range: + // #x9 | #xA | #xD | [#x20-#x7E] (8 bit) + // | #x85 | [#xA0-#xD7FF] | [#xE000-#xFFFD] (16 bit) + // | [#x10000-#x10FFFF] (32 bit) + switch { + case value == 0x09: + case value == 0x0A: + case value == 0x0D: + case value >= 0x20 && value <= 0x7E: + case value == 0x85: + case value >= 0xA0 && value <= 0xD7FF: + case value >= 0xE000 && value <= 0xFFFD: + case value >= 0x10000 && value <= 0x10FFFF: + default: + return parser.setReaderError( + "control characters are not allowed", + parser.offset, int(value)) + } + + // Move the raw pointers. + parser.raw_buffer_pos += width + parser.offset += width + + // Finally put the character into the buffer. + if value <= 0x7F { + // 0000 0000-0000 007F . 0xxxxxxx + parser.buffer[buffer_len+0] = byte(value) + buffer_len += 1 + } else if value <= 0x7FF { + // 0000 0080-0000 07FF . 110xxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xC0 + (value >> 6)) + parser.buffer[buffer_len+1] = byte(0x80 + (value & 0x3F)) + buffer_len += 2 + } else if value <= 0xFFFF { + // 0000 0800-0000 FFFF . 1110xxxx 10xxxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xE0 + (value >> 12)) + parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 6) & 0x3F)) + parser.buffer[buffer_len+2] = byte(0x80 + (value & 0x3F)) + buffer_len += 3 + } else { + // 0001 0000-0010 FFFF . 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx + parser.buffer[buffer_len+0] = byte(0xF0 + (value >> 18)) + parser.buffer[buffer_len+1] = byte(0x80 + ((value >> 12) & 0x3F)) + parser.buffer[buffer_len+2] = byte(0x80 + ((value >> 6) & 0x3F)) + parser.buffer[buffer_len+3] = byte(0x80 + (value & 0x3F)) + buffer_len += 4 + } + + parser.unread++ + } + + // On EOF, put NUL into the buffer and return. + if parser.eof { + parser.buffer[buffer_len] = 0 + buffer_len++ + parser.unread++ + break + } + } + // [Go] Read the documentation of this function above. To return true, + // we need to have the given length in the buffer. Not doing that means + // every single check that calls this function to make sure the buffer + // has a given length is Go) panicking; or C) accessing invalid memory. + // This happens here due to the EOF above breaking early. + for buffer_len < length { + parser.buffer[buffer_len] = 0 + buffer_len++ + } + parser.buffer = parser.buffer[:buffer_len] + return true +} diff --git a/vendor/go.yaml.in/yaml/v4/internal/libyaml/scanner.go b/vendor/go.yaml.in/yaml/v4/internal/libyaml/scanner.go new file mode 100644 index 00000000..fbb3b5c8 --- /dev/null +++ b/vendor/go.yaml.in/yaml/v4/internal/libyaml/scanner.go @@ -0,0 +1,3030 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package libyaml + +import ( + "bytes" + "fmt" +) + +// Introduction +// ************ +// +// The following notes assume that you are familiar with the YAML specification +// (http://yaml.org/spec/1.2/spec.html). We mostly follow it, although in +// some cases we are less restrictive that it requires. +// +// The process of transforming a YAML stream into a sequence of events is +// divided on two steps: Scanning and Parsing. +// +// The Scanner transforms the input stream into a sequence of tokens, while the +// parser transform the sequence of tokens produced by the Scanner into a +// sequence of parsing events. +// +// The Scanner is rather clever and complicated. The Parser, on the contrary, +// is a straightforward implementation of a recursive-descendant parser (or, +// LL(1) parser, as it is usually called). +// +// Actually there are two issues of Scanning that might be called "clever", the +// rest is quite straightforward. The issues are "block collection start" and +// "simple keys". Both issues are explained below in details. +// +// Here the Scanning step is explained and implemented. We start with the list +// of all the tokens produced by the Scanner together with short descriptions. +// +// Now, tokens: +// +// STREAM-START(encoding) # The stream start. +// STREAM-END # The stream end. +// VERSION-DIRECTIVE(major,minor) # The '%YAML' directive. +// TAG-DIRECTIVE(handle,prefix) # The '%TAG' directive. +// DOCUMENT-START # '---' +// DOCUMENT-END # '...' +// BLOCK-SEQUENCE-START # Indentation increase denoting a block +// BLOCK-MAPPING-START # sequence or a block mapping. +// BLOCK-END # Indentation decrease. +// FLOW-SEQUENCE-START # '[' +// FLOW-SEQUENCE-END # ']' +// BLOCK-SEQUENCE-START # '{' +// BLOCK-SEQUENCE-END # '}' +// BLOCK-ENTRY # '-' +// FLOW-ENTRY # ',' +// KEY # '?' or nothing (simple keys). +// VALUE # ':' +// ALIAS(anchor) # '*anchor' +// ANCHOR(anchor) # '&anchor' +// TAG(handle,suffix) # '!handle!suffix' +// SCALAR(value,style) # A scalar. +// +// The following two tokens are "virtual" tokens denoting the beginning and the +// end of the stream: +// +// STREAM-START(encoding) +// STREAM-END +// +// We pass the information about the input stream encoding with the +// STREAM-START token. +// +// The next two tokens are responsible for tags: +// +// VERSION-DIRECTIVE(major,minor) +// TAG-DIRECTIVE(handle,prefix) +// +// Example: +// +// %YAML 1.1 +// %TAG ! !foo +// %TAG !yaml! tag:yaml.org,2002: +// --- +// +// The corresponding sequence of tokens: +// +// STREAM-START(utf-8) +// VERSION-DIRECTIVE(1,1) +// TAG-DIRECTIVE("!","!foo") +// TAG-DIRECTIVE("!yaml","tag:yaml.org,2002:") +// DOCUMENT-START +// STREAM-END +// +// Note that the VERSION-DIRECTIVE and TAG-DIRECTIVE tokens occupy a whole +// line. +// +// The document start and end indicators are represented by: +// +// DOCUMENT-START +// DOCUMENT-END +// +// Note that if a YAML stream contains an implicit document (without '---' +// and '...' indicators), no DOCUMENT-START and DOCUMENT-END tokens will be +// produced. +// +// In the following examples, we present whole documents together with the +// produced tokens. +// +// 1. An implicit document: +// +// 'a scalar' +// +// Tokens: +// +// STREAM-START(utf-8) +// SCALAR("a scalar",single-quoted) +// STREAM-END +// +// 2. An explicit document: +// +// --- +// 'a scalar' +// ... +// +// Tokens: +// +// STREAM-START(utf-8) +// DOCUMENT-START +// SCALAR("a scalar",single-quoted) +// DOCUMENT-END +// STREAM-END +// +// 3. Several documents in a stream: +// +// 'a scalar' +// --- +// 'another scalar' +// --- +// 'yet another scalar' +// +// Tokens: +// +// STREAM-START(utf-8) +// SCALAR("a scalar",single-quoted) +// DOCUMENT-START +// SCALAR("another scalar",single-quoted) +// DOCUMENT-START +// SCALAR("yet another scalar",single-quoted) +// STREAM-END +// +// We have already introduced the SCALAR token above. The following tokens are +// used to describe aliases, anchors, tag, and scalars: +// +// ALIAS(anchor) +// ANCHOR(anchor) +// TAG(handle,suffix) +// SCALAR(value,style) +// +// The following series of examples illustrate the usage of these tokens: +// +// 1. A recursive sequence: +// +// &A [ *A ] +// +// Tokens: +// +// STREAM-START(utf-8) +// ANCHOR("A") +// FLOW-SEQUENCE-START +// ALIAS("A") +// FLOW-SEQUENCE-END +// STREAM-END +// +// 2. A tagged scalar: +// +// !!float "3.14" # A good approximation. +// +// Tokens: +// +// STREAM-START(utf-8) +// TAG("!!","float") +// SCALAR("3.14",double-quoted) +// STREAM-END +// +// 3. Various scalar styles: +// +// --- # Implicit empty plain scalars do not produce tokens. +// --- a plain scalar +// --- 'a single-quoted scalar' +// --- "a double-quoted scalar" +// --- |- +// a literal scalar +// --- >- +// a folded +// scalar +// +// Tokens: +// +// STREAM-START(utf-8) +// DOCUMENT-START +// DOCUMENT-START +// SCALAR("a plain scalar",plain) +// DOCUMENT-START +// SCALAR("a single-quoted scalar",single-quoted) +// DOCUMENT-START +// SCALAR("a double-quoted scalar",double-quoted) +// DOCUMENT-START +// SCALAR("a literal scalar",literal) +// DOCUMENT-START +// SCALAR("a folded scalar",folded) +// STREAM-END +// +// Now it's time to review collection-related tokens. We will start with +// flow collections: +// +// FLOW-SEQUENCE-START +// FLOW-SEQUENCE-END +// FLOW-MAPPING-START +// FLOW-MAPPING-END +// FLOW-ENTRY +// KEY +// VALUE +// +// The tokens FLOW-SEQUENCE-START, FLOW-SEQUENCE-END, FLOW-MAPPING-START, and +// FLOW-MAPPING-END represent the indicators '[', ']', '{', and '}' +// correspondingly. FLOW-ENTRY represent the ',' indicator. Finally the +// indicators '?' and ':', which are used for denoting mapping keys and values, +// are represented by the KEY and VALUE tokens. +// +// The following examples show flow collections: +// +// 1. A flow sequence: +// +// [item 1, item 2, item 3] +// +// Tokens: +// +// STREAM-START(utf-8) +// FLOW-SEQUENCE-START +// SCALAR("item 1",plain) +// FLOW-ENTRY +// SCALAR("item 2",plain) +// FLOW-ENTRY +// SCALAR("item 3",plain) +// FLOW-SEQUENCE-END +// STREAM-END +// +// 2. A flow mapping: +// +// { +// a simple key: a value, # Note that the KEY token is produced. +// ? a complex key: another value, +// } +// +// Tokens: +// +// STREAM-START(utf-8) +// FLOW-MAPPING-START +// KEY +// SCALAR("a simple key",plain) +// VALUE +// SCALAR("a value",plain) +// FLOW-ENTRY +// KEY +// SCALAR("a complex key",plain) +// VALUE +// SCALAR("another value",plain) +// FLOW-ENTRY +// FLOW-MAPPING-END +// STREAM-END +// +// A simple key is a key which is not denoted by the '?' indicator. Note that +// the Scanner still produce the KEY token whenever it encounters a simple key. +// +// For scanning block collections, the following tokens are used (note that we +// repeat KEY and VALUE here): +// +// BLOCK-SEQUENCE-START +// BLOCK-MAPPING-START +// BLOCK-END +// BLOCK-ENTRY +// KEY +// VALUE +// +// The tokens BLOCK-SEQUENCE-START and BLOCK-MAPPING-START denote indentation +// increase that precedes a block collection (cf. the INDENT token in Python). +// The token BLOCK-END denote indentation decrease that ends a block collection +// (cf. the DEDENT token in Python). However YAML has some syntax peculiarities +// that makes detections of these tokens more complex. +// +// The tokens BLOCK-ENTRY, KEY, and VALUE are used to represent the indicators +// '-', '?', and ':' correspondingly. +// +// The following examples show how the tokens BLOCK-SEQUENCE-START, +// BLOCK-MAPPING-START, and BLOCK-END are emitted by the Scanner: +// +// 1. Block sequences: +// +// - item 1 +// - item 2 +// - +// - item 3.1 +// - item 3.2 +// - +// key 1: value 1 +// key 2: value 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-ENTRY +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 3.1",plain) +// BLOCK-ENTRY +// SCALAR("item 3.2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// 2. Block mappings: +// +// a simple key: a value # The KEY token is produced here. +// ? a complex key +// : another value +// a mapping: +// key 1: value 1 +// key 2: value 2 +// a sequence: +// - item 1 +// - item 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("a simple key",plain) +// VALUE +// SCALAR("a value",plain) +// KEY +// SCALAR("a complex key",plain) +// VALUE +// SCALAR("another value",plain) +// KEY +// SCALAR("a mapping",plain) +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// KEY +// SCALAR("a sequence",plain) +// VALUE +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// YAML does not always require to start a new block collection from a new +// line. If the current line contains only '-', '?', and ':' indicators, a new +// block collection may start at the current line. The following examples +// illustrate this case: +// +// 1. Collections in a sequence: +// +// - - item 1 +// - item 2 +// - key 1: value 1 +// key 2: value 2 +// - ? complex key +// : complex value +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-ENTRY +// BLOCK-MAPPING-START +// KEY +// SCALAR("complex key") +// VALUE +// SCALAR("complex value") +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// 2. Collections in a mapping: +// +// ? a sequence +// : - item 1 +// - item 2 +// ? a mapping +// : key 1: value 1 +// key 2: value 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("a sequence",plain) +// VALUE +// BLOCK-SEQUENCE-START +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// KEY +// SCALAR("a mapping",plain) +// VALUE +// BLOCK-MAPPING-START +// KEY +// SCALAR("key 1",plain) +// VALUE +// SCALAR("value 1",plain) +// KEY +// SCALAR("key 2",plain) +// VALUE +// SCALAR("value 2",plain) +// BLOCK-END +// BLOCK-END +// STREAM-END +// +// YAML also permits non-indented sequences if they are included into a block +// mapping. In this case, the token BLOCK-SEQUENCE-START is not produced: +// +// key: +// - item 1 # BLOCK-SEQUENCE-START is NOT produced here. +// - item 2 +// +// Tokens: +// +// STREAM-START(utf-8) +// BLOCK-MAPPING-START +// KEY +// SCALAR("key",plain) +// VALUE +// BLOCK-ENTRY +// SCALAR("item 1",plain) +// BLOCK-ENTRY +// SCALAR("item 2",plain) +// BLOCK-END +// + +// Ensure that the buffer contains the required number of characters. +// Return true on success, false on failure (reader error or memory error). +func (parser *Parser) cache(length int) bool { + // [Go] This was inlined: !A.cache(B) -> unread < B && !A.update(B) + return parser.unread >= length || parser.updateBuffer(length) +} + +// Advance the buffer pointer. +func (parser *Parser) skip() { + if !isBlank(parser.buffer, parser.buffer_pos) { + parser.newlines = 0 + } + parser.mark.Index++ + parser.mark.Column++ + parser.unread-- + parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) +} + +func (parser *Parser) skipLine() { + if isCRLF(parser.buffer, parser.buffer_pos) { + parser.mark.Index += 2 + parser.mark.Column = 0 + parser.mark.Line++ + parser.unread -= 2 + parser.buffer_pos += 2 + parser.newlines++ + } else if isLineBreak(parser.buffer, parser.buffer_pos) { + parser.mark.Index++ + parser.mark.Column = 0 + parser.mark.Line++ + parser.unread-- + parser.buffer_pos += width(parser.buffer[parser.buffer_pos]) + parser.newlines++ + } +} + +// Copy a character to a string buffer and advance pointers. +func (parser *Parser) read(s []byte) []byte { + if !isBlank(parser.buffer, parser.buffer_pos) { + parser.newlines = 0 + } + w := width(parser.buffer[parser.buffer_pos]) + if w == 0 { + panic("invalid character sequence") + } + if len(s) == 0 { + s = make([]byte, 0, 32) + } + if w == 1 && len(s)+w <= cap(s) { + s = s[:len(s)+1] + s[len(s)-1] = parser.buffer[parser.buffer_pos] + parser.buffer_pos++ + } else { + s = append(s, parser.buffer[parser.buffer_pos:parser.buffer_pos+w]...) + parser.buffer_pos += w + } + parser.mark.Index++ + parser.mark.Column++ + parser.unread-- + return s +} + +// Copy a line break character to a string buffer and advance pointers. +func (parser *Parser) readLine(s []byte) []byte { + buf := parser.buffer + pos := parser.buffer_pos + switch { + case buf[pos] == '\r' && buf[pos+1] == '\n': + // CR LF . LF + s = append(s, '\n') + parser.buffer_pos += 2 + parser.mark.Index++ + parser.unread-- + case buf[pos] == '\r' || buf[pos] == '\n': + // CR|LF . LF + s = append(s, '\n') + parser.buffer_pos += 1 + case buf[pos] == '\xC2' && buf[pos+1] == '\x85': + // NEL . LF + s = append(s, '\n') + parser.buffer_pos += 2 + case buf[pos] == '\xE2' && buf[pos+1] == '\x80' && (buf[pos+2] == '\xA8' || buf[pos+2] == '\xA9'): + // LS|PS . LS|PS + s = append(s, buf[parser.buffer_pos:pos+3]...) + parser.buffer_pos += 3 + default: + return s + } + parser.mark.Index++ + parser.mark.Column = 0 + parser.mark.Line++ + parser.unread-- + parser.newlines++ + return s +} + +// Scan gets the next token. +func (parser *Parser) Scan(token *Token) bool { + // Erase the token object. + *token = Token{} // [Go] Is this necessary? + + // No tokens after STREAM-END or error. + if parser.stream_end_produced || parser.ErrorType != NO_ERROR { + return true + } + + // Ensure that the tokens queue contains enough tokens. + if !parser.token_available { + if !parser.fetchMoreTokens() { + return false + } + } + + // Fetch the next token from the queue. + *token = parser.tokens[parser.tokens_head] + parser.tokens_head++ + parser.tokens_parsed++ + parser.token_available = false + + if token.Type == STREAM_END_TOKEN { + parser.stream_end_produced = true + } + return true +} + +// Set the scanner error and return false. +func (parser *Parser) setScannerError(context string, context_mark Mark, problem string) bool { + parser.ErrorType = SCANNER_ERROR + parser.Context = context + parser.ContextMark = context_mark + parser.Problem = problem + parser.ProblemMark = parser.mark + return false +} + +func (parser *Parser) setScannerTagError(directive bool, context_mark Mark, problem string) bool { + context := "while parsing a tag" + if directive { + context = "while parsing a %TAG directive" + } + return parser.setScannerError(context, context_mark, problem) +} + +func trace(args ...any) func() { + pargs := append([]any{"+++"}, args...) + fmt.Println(pargs...) + pargs = append([]any{"---"}, args...) + return func() { fmt.Println(pargs...) } +} + +// Ensure that the tokens queue contains at least one token which can be +// returned to the Parser. +func (parser *Parser) fetchMoreTokens() bool { + // While we need more tokens to fetch, do it. + for { + // [Go] The comment parsing logic requires a lookahead of two tokens + // so that foot comments may be parsed in time of associating them + // with the tokens that are parsed before them, and also for line + // comments to be transformed into head comments in some edge cases. + if parser.tokens_head < len(parser.tokens)-2 { + // If a potential simple key is at the head position, we need to fetch + // the next token to disambiguate it. + head_tok_idx, ok := parser.simple_keys_by_tok[parser.tokens_parsed] + if !ok { + break + } else if valid, ok := parser.simpleKeyIsValid(&parser.simple_keys[head_tok_idx]); !ok { + return false + } else if !valid { + break + } + } + // Fetch the next token. + if !parser.fetchNextToken() { + return false + } + } + + parser.token_available = true + return true +} + +// The dispatcher for token fetchers. +func (parser *Parser) fetchNextToken() (ok bool) { + // Ensure that the buffer is initialized. + if parser.unread < 1 && !parser.updateBuffer(1) { + return false + } + + // Check if we just started scanning. Fetch STREAM-START then. + if !parser.stream_start_produced { + return parser.fetchStreamStart() + } + + scan_mark := parser.mark + + // Eat whitespaces and comments until we reach the next token. + if !parser.scanToNextToken() { + return false + } + + // [Go] While unrolling indents, transform the head comments of prior + // indentation levels observed after scan_start into foot comments at + // the respective indexes. + + // Check the indentation level against the current column. + if !parser.unrollIndent(parser.mark.Column, scan_mark) { + return false + } + + // Ensure that the buffer contains at least 4 characters. 4 is the length + // of the longest indicators ('--- ' and '... '). + if parser.unread < 4 && !parser.updateBuffer(4) { + return false + } + + // Is it the end of the stream? + if isZeroChar(parser.buffer, parser.buffer_pos) { + return parser.fetchStreamEnd() + } + + // Is it a directive? + if parser.mark.Column == 0 && parser.buffer[parser.buffer_pos] == '%' { + return parser.fetchDirective() + } + + buf := parser.buffer + pos := parser.buffer_pos + + // Is it the document start indicator? + if parser.mark.Column == 0 && buf[pos] == '-' && buf[pos+1] == '-' && buf[pos+2] == '-' && isBlankOrZero(buf, pos+3) { + return parser.fetchDocumentIndicator(DOCUMENT_START_TOKEN) + } + + // Is it the document end indicator? + if parser.mark.Column == 0 && buf[pos] == '.' && buf[pos+1] == '.' && buf[pos+2] == '.' && isBlankOrZero(buf, pos+3) { + return parser.fetchDocumentIndicator(DOCUMENT_END_TOKEN) + } + + comment_mark := parser.mark + if len(parser.tokens) > 0 && (parser.flow_level == 0 && buf[pos] == ':' || parser.flow_level > 0 && buf[pos] == ',') { + // Associate any following comments with the prior token. + comment_mark = parser.tokens[len(parser.tokens)-1].StartMark + } + defer func() { + if !ok { + return + } + if len(parser.tokens) > 0 && parser.tokens[len(parser.tokens)-1].Type == BLOCK_ENTRY_TOKEN { + // Sequence indicators alone have no line comments. It becomes + // a head comment for whatever follows. + return + } + if !parser.scanLineComment(comment_mark) { + ok = false + return + } + }() + + // Is it the flow sequence start indicator? + if buf[pos] == '[' { + return parser.fetchFlowCollectionStart(FLOW_SEQUENCE_START_TOKEN) + } + + // Is it the flow mapping start indicator? + if parser.buffer[parser.buffer_pos] == '{' { + return parser.fetchFlowCollectionStart(FLOW_MAPPING_START_TOKEN) + } + + // Is it the flow sequence end indicator? + if parser.buffer[parser.buffer_pos] == ']' { + return parser.fetchFlowCollectionEnd( + FLOW_SEQUENCE_END_TOKEN) + } + + // Is it the flow mapping end indicator? + if parser.buffer[parser.buffer_pos] == '}' { + return parser.fetchFlowCollectionEnd( + FLOW_MAPPING_END_TOKEN) + } + + // Is it the flow entry indicator? + if parser.buffer[parser.buffer_pos] == ',' { + return parser.fetchFlowEntry() + } + + // Is it the block entry indicator? + if parser.buffer[parser.buffer_pos] == '-' && isBlankOrZero(parser.buffer, parser.buffer_pos+1) { + return parser.fetchBlockEntry() + } + + // Is it the key indicator? + if parser.buffer[parser.buffer_pos] == '?' && isBlankOrZero(parser.buffer, parser.buffer_pos+1) { + return parser.fetchKey() + } + + // Is it the value indicator? + if parser.buffer[parser.buffer_pos] == ':' && (parser.flow_level > 0 || isBlankOrZero(parser.buffer, parser.buffer_pos+1)) { + return parser.fetchValue() + } + + // Is it an alias? + if parser.buffer[parser.buffer_pos] == '*' { + return parser.fetchAnchor(ALIAS_TOKEN) + } + + // Is it an anchor? + if parser.buffer[parser.buffer_pos] == '&' { + return parser.fetchAnchor(ANCHOR_TOKEN) + } + + // Is it a tag? + if parser.buffer[parser.buffer_pos] == '!' { + return parser.fetchTag() + } + + // Is it a literal scalar? + if parser.buffer[parser.buffer_pos] == '|' && parser.flow_level == 0 { + return parser.fetchBlockScalar(true) + } + + // Is it a folded scalar? + if parser.buffer[parser.buffer_pos] == '>' && parser.flow_level == 0 { + return parser.fetchBlockScalar(false) + } + + // Is it a single-quoted scalar? + if parser.buffer[parser.buffer_pos] == '\'' { + return parser.fetchFlowScalar(true) + } + + // Is it a double-quoted scalar? + if parser.buffer[parser.buffer_pos] == '"' { + return parser.fetchFlowScalar(false) + } + + // Is it a plain scalar? + // + // A plain scalar may start with any non-blank characters except + // + // '-', '?', ':', ',', '[', ']', '{', '}', + // '#', '&', '*', '!', '|', '>', '\'', '\"', + // '%', '@', '`'. + // + // In the block context (and, for the '-' indicator, in the flow context + // too), it may also start with the characters + // + // '-', '?', ':' + // + // if it is followed by a non-space character. + // + // The last rule is more restrictive than the specification requires. + // [Go] TODO Make this logic more reasonable. + //switch parser.buffer[parser.buffer_pos] { + //case '-', '?', ':', ',', '?', '-', ',', ':', ']', '[', '}', '{', '&', '#', '!', '*', '>', '|', '"', '\'', '@', '%', '-', '`': + //} + if !(isBlankOrZero(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '-' || + parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':' || + parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '[' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || + parser.buffer[parser.buffer_pos] == '}' || parser.buffer[parser.buffer_pos] == '#' || + parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '*' || + parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '|' || + parser.buffer[parser.buffer_pos] == '>' || parser.buffer[parser.buffer_pos] == '\'' || + parser.buffer[parser.buffer_pos] == '"' || parser.buffer[parser.buffer_pos] == '%' || + parser.buffer[parser.buffer_pos] == '@' || parser.buffer[parser.buffer_pos] == '`') || + (parser.buffer[parser.buffer_pos] == '-' && !isBlank(parser.buffer, parser.buffer_pos+1)) || + ((parser.buffer[parser.buffer_pos] == '?' || parser.buffer[parser.buffer_pos] == ':') && + !isBlankOrZero(parser.buffer, parser.buffer_pos+1)) { + return parser.fetchPlainScalar() + } + + // If we don't determine the token type so far, it is an error. + return parser.setScannerError( + "while scanning for the next token", parser.mark, + "found character that cannot start any token") +} + +func (parser *Parser) simpleKeyIsValid(simple_key *SimpleKey) (valid, ok bool) { + if !simple_key.possible { + return false, true + } + + // The 1.2 specification says: + // + // "If the ? indicator is omitted, parsing needs to see past the + // implicit key to recognize it as such. To limit the amount of + // lookahead required, the “:” indicator must appear at most 1024 + // Unicode characters beyond the start of the key. In addition, the key + // is restricted to a single line." + // + if simple_key.mark.Line < parser.mark.Line || simple_key.mark.Index+1024 < parser.mark.Index { + // Check if the potential simple key to be removed is required. + if simple_key.required { + return false, parser.setScannerError( + "while scanning a simple key", simple_key.mark, + "could not find expected ':'") + } + simple_key.possible = false + return false, true + } + return true, true +} + +// Check if a simple key may start at the current position and add it if +// needed. +func (parser *Parser) saveSimpleKey() bool { + // A simple key is required at the current position if the scanner is in + // the block context and the current column coincides with the indentation + // level. + + required := parser.flow_level == 0 && parser.indent == parser.mark.Column + + // + // If the current position may start a simple key, save it. + // + if parser.simple_key_allowed { + simple_key := SimpleKey{ + possible: true, + required: required, + token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head), + mark: parser.mark, + } + + if !parser.removeSimpleKey() { + return false + } + parser.simple_keys[len(parser.simple_keys)-1] = simple_key + parser.simple_keys_by_tok[simple_key.token_number] = len(parser.simple_keys) - 1 + } + return true +} + +// Remove a potential simple key at the current flow level. +func (parser *Parser) removeSimpleKey() bool { + i := len(parser.simple_keys) - 1 + if parser.simple_keys[i].possible { + // If the key is required, it is an error. + if parser.simple_keys[i].required { + return parser.setScannerError( + "while scanning a simple key", parser.simple_keys[i].mark, + "could not find expected ':'") + } + // Remove the key from the stack. + parser.simple_keys[i].possible = false + delete(parser.simple_keys_by_tok, parser.simple_keys[i].token_number) + } + return true +} + +// max_flow_level limits the flow_level +const max_flow_level = 10000 + +// Increase the flow level and resize the simple key list if needed. +func (parser *Parser) increaseFlowLevel() bool { + // Reset the simple key on the next level. + parser.simple_keys = append(parser.simple_keys, SimpleKey{ + possible: false, + required: false, + token_number: parser.tokens_parsed + (len(parser.tokens) - parser.tokens_head), + mark: parser.mark, + }) + + // Increase the flow level. + parser.flow_level++ + if parser.flow_level > max_flow_level { + return parser.setScannerError( + "while increasing flow level", parser.simple_keys[len(parser.simple_keys)-1].mark, + fmt.Sprintf("exceeded max depth of %d", max_flow_level)) + } + return true +} + +// Decrease the flow level. +func (parser *Parser) decreaseFlowLevel() bool { + if parser.flow_level > 0 { + parser.flow_level-- + last := len(parser.simple_keys) - 1 + delete(parser.simple_keys_by_tok, parser.simple_keys[last].token_number) + parser.simple_keys = parser.simple_keys[:last] + } + return true +} + +// max_indents limits the indents stack size +const max_indents = 10000 + +// Push the current indentation level to the stack and set the new level +// the current column is greater than the indentation level. In this case, +// append or insert the specified token into the token queue. +func (parser *Parser) rollIndent(column, number int, typ TokenType, mark Mark) bool { + // In the flow context, do nothing. + if parser.flow_level > 0 { + return true + } + + if parser.indent < column { + // Push the current indentation level to the stack and set the new + // indentation level. + parser.indents = append(parser.indents, parser.indent) + parser.indent = column + if len(parser.indents) > max_indents { + return parser.setScannerError( + "while increasing indent level", parser.simple_keys[len(parser.simple_keys)-1].mark, + fmt.Sprintf("exceeded max depth of %d", max_indents)) + } + + // Create a token and insert it into the queue. + token := Token{ + Type: typ, + StartMark: mark, + EndMark: mark, + } + if number > -1 { + number -= parser.tokens_parsed + } + parser.insertToken(number, &token) + } + return true +} + +// Pop indentation levels from the indents stack until the current level +// becomes less or equal to the column. For each indentation level, append +// the BLOCK-END token. +func (parser *Parser) unrollIndent(column int, scan_mark Mark) bool { + // In the flow context, do nothing. + if parser.flow_level > 0 { + return true + } + + block_mark := scan_mark + block_mark.Index-- + + // Loop through the indentation levels in the stack. + for parser.indent > column { + + // [Go] Reposition the end token before potential following + // foot comments of parent blocks. For that, search + // backwards for recent comments that were at the same + // indent as the block that is ending now. + stop_index := block_mark.Index + for i := len(parser.comments) - 1; i >= 0; i-- { + comment := &parser.comments[i] + + if comment.end_mark.Index < stop_index { + // Don't go back beyond the start of the comment/whitespace scan, unless column < 0. + // If requested indent column is < 0, then the document is over and everything else + // is a foot anyway. + break + } + if comment.start_mark.Column == parser.indent+1 { + // This is a good match. But maybe there's a former comment + // at that same indent level, so keep searching. + block_mark = comment.start_mark + } + + // While the end of the former comment matches with + // the start of the following one, we know there's + // nothing in between and scanning is still safe. + stop_index = comment.scan_mark.Index + } + + // Create a token and append it to the queue. + token := Token{ + Type: BLOCK_END_TOKEN, + StartMark: block_mark, + EndMark: block_mark, + } + parser.insertToken(-1, &token) + + // Pop the indentation level. + parser.indent = parser.indents[len(parser.indents)-1] + parser.indents = parser.indents[:len(parser.indents)-1] + } + return true +} + +// Initialize the scanner and produce the STREAM-START token. +func (parser *Parser) fetchStreamStart() bool { + // Set the initial indentation. + parser.indent = -1 + + // Initialize the simple key stack. + parser.simple_keys = append(parser.simple_keys, SimpleKey{}) + + parser.simple_keys_by_tok = make(map[int]int) + + // A simple key is allowed at the beginning of the stream. + parser.simple_key_allowed = true + + // We have started. + parser.stream_start_produced = true + + // Create the STREAM-START token and append it to the queue. + token := Token{ + Type: STREAM_START_TOKEN, + StartMark: parser.mark, + EndMark: parser.mark, + encoding: parser.encoding, + } + parser.insertToken(-1, &token) + return true +} + +// Produce the STREAM-END token and shut down the scanner. +func (parser *Parser) fetchStreamEnd() bool { + // Force new line. + if parser.mark.Column != 0 { + parser.mark.Column = 0 + parser.mark.Line++ + } + + // Reset the indentation level. + if !parser.unrollIndent(-1, parser.mark) { + return false + } + + // Reset simple keys. + if !parser.removeSimpleKey() { + return false + } + + parser.simple_key_allowed = false + + // Create the STREAM-END token and append it to the queue. + token := Token{ + Type: STREAM_END_TOKEN, + StartMark: parser.mark, + EndMark: parser.mark, + } + parser.insertToken(-1, &token) + return true +} + +// Produce a VERSION-DIRECTIVE or TAG-DIRECTIVE token. +func (parser *Parser) fetchDirective() bool { + // Reset the indentation level. + if !parser.unrollIndent(-1, parser.mark) { + return false + } + + // Reset simple keys. + if !parser.removeSimpleKey() { + return false + } + + parser.simple_key_allowed = false + + // Create the YAML-DIRECTIVE or TAG-DIRECTIVE token. + token := Token{} + if !parser.scanDirective(&token) { + return false + } + // Append the token to the queue. + parser.insertToken(-1, &token) + return true +} + +// Produce the DOCUMENT-START or DOCUMENT-END token. +func (parser *Parser) fetchDocumentIndicator(typ TokenType) bool { + // Reset the indentation level. + if !parser.unrollIndent(-1, parser.mark) { + return false + } + + // Reset simple keys. + if !parser.removeSimpleKey() { + return false + } + + parser.simple_key_allowed = false + + // Consume the token. + start_mark := parser.mark + + parser.skip() + parser.skip() + parser.skip() + + end_mark := parser.mark + + // Create the DOCUMENT-START or DOCUMENT-END token. + token := Token{ + Type: typ, + StartMark: start_mark, + EndMark: end_mark, + } + // Append the token to the queue. + parser.insertToken(-1, &token) + return true +} + +// Produce the FLOW-SEQUENCE-START or FLOW-MAPPING-START token. +func (parser *Parser) fetchFlowCollectionStart(typ TokenType) bool { + // The indicators '[' and '{' may start a simple key. + if !parser.saveSimpleKey() { + return false + } + + // Increase the flow level. + if !parser.increaseFlowLevel() { + return false + } + + // A simple key may follow the indicators '[' and '{'. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + parser.skip() + end_mark := parser.mark + + // Create the FLOW-SEQUENCE-START of FLOW-MAPPING-START token. + token := Token{ + Type: typ, + StartMark: start_mark, + EndMark: end_mark, + } + // Append the token to the queue. + parser.insertToken(-1, &token) + return true +} + +// Produce the FLOW-SEQUENCE-END or FLOW-MAPPING-END token. +func (parser *Parser) fetchFlowCollectionEnd(typ TokenType) bool { + // Reset any potential simple key on the current flow level. + if !parser.removeSimpleKey() { + return false + } + + // Decrease the flow level. + if !parser.decreaseFlowLevel() { + return false + } + + // No simple keys after the indicators ']' and '}'. + parser.simple_key_allowed = false + + // Consume the token. + + start_mark := parser.mark + parser.skip() + end_mark := parser.mark + + // Create the FLOW-SEQUENCE-END of FLOW-MAPPING-END token. + token := Token{ + Type: typ, + StartMark: start_mark, + EndMark: end_mark, + } + // Append the token to the queue. + parser.insertToken(-1, &token) + return true +} + +// Produce the FLOW-ENTRY token. +func (parser *Parser) fetchFlowEntry() bool { + // Reset any potential simple keys on the current flow level. + if !parser.removeSimpleKey() { + return false + } + + // Simple keys are allowed after ','. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + parser.skip() + end_mark := parser.mark + + // Create the FLOW-ENTRY token and append it to the queue. + token := Token{ + Type: FLOW_ENTRY_TOKEN, + StartMark: start_mark, + EndMark: end_mark, + } + parser.insertToken(-1, &token) + return true +} + +// Produce the BLOCK-ENTRY token. +func (parser *Parser) fetchBlockEntry() bool { + // Check if the scanner is in the block context. + if parser.flow_level == 0 { + // Check if we are allowed to start a new entry. + if !parser.simple_key_allowed { + return parser.setScannerError("", parser.mark, + "block sequence entries are not allowed in this context") + } + // Add the BLOCK-SEQUENCE-START token if needed. + if !parser.rollIndent(parser.mark.Column, -1, BLOCK_SEQUENCE_START_TOKEN, parser.mark) { + return false + } + } else { //nolint:staticcheck // there is no problem with this empty branch as it's documentation. + + // It is an error for the '-' indicator to occur in the flow context, + // but we let the Parser detect and report about it because the Parser + // is able to point to the context. + } + + // Reset any potential simple keys on the current flow level. + if !parser.removeSimpleKey() { + return false + } + + // Simple keys are allowed after '-'. + parser.simple_key_allowed = true + + // Consume the token. + start_mark := parser.mark + parser.skip() + end_mark := parser.mark + + // Create the BLOCK-ENTRY token and append it to the queue. + token := Token{ + Type: BLOCK_ENTRY_TOKEN, + StartMark: start_mark, + EndMark: end_mark, + } + parser.insertToken(-1, &token) + return true +} + +// Produce the KEY token. +func (parser *Parser) fetchKey() bool { + // In the block context, additional checks are required. + if parser.flow_level == 0 { + // Check if we are allowed to start a new key (not necessary simple). + if !parser.simple_key_allowed { + return parser.setScannerError("", parser.mark, + "mapping keys are not allowed in this context") + } + // Add the BLOCK-MAPPING-START token if needed. + if !parser.rollIndent(parser.mark.Column, -1, BLOCK_MAPPING_START_TOKEN, parser.mark) { + return false + } + } + + // Reset any potential simple keys on the current flow level. + if !parser.removeSimpleKey() { + return false + } + + // Simple keys are allowed after '?' in the block context. + parser.simple_key_allowed = parser.flow_level == 0 + + // Consume the token. + start_mark := parser.mark + parser.skip() + end_mark := parser.mark + + // Create the KEY token and append it to the queue. + token := Token{ + Type: KEY_TOKEN, + StartMark: start_mark, + EndMark: end_mark, + } + parser.insertToken(-1, &token) + return true +} + +// Produce the VALUE token. +func (parser *Parser) fetchValue() bool { + simple_key := &parser.simple_keys[len(parser.simple_keys)-1] + + // Have we found a simple key? + if valid, ok := parser.simpleKeyIsValid(simple_key); !ok { + return false + } else if valid { + + // Create the KEY token and insert it into the queue. + token := Token{ + Type: KEY_TOKEN, + StartMark: simple_key.mark, + EndMark: simple_key.mark, + } + parser.insertToken(simple_key.token_number-parser.tokens_parsed, &token) + + // In the block context, we may need to add the BLOCK-MAPPING-START token. + if !parser.rollIndent(simple_key.mark.Column, + simple_key.token_number, + BLOCK_MAPPING_START_TOKEN, simple_key.mark) { + return false + } + + // Remove the simple key. + simple_key.possible = false + delete(parser.simple_keys_by_tok, simple_key.token_number) + + // A simple key cannot follow another simple key. + parser.simple_key_allowed = false + + } else { + // The ':' indicator follows a complex key. + + // In the block context, extra checks are required. + if parser.flow_level == 0 { + + // Check if we are allowed to start a complex value. + if !parser.simple_key_allowed { + return parser.setScannerError("", parser.mark, + "mapping values are not allowed in this context") + } + + // Add the BLOCK-MAPPING-START token if needed. + if !parser.rollIndent(parser.mark.Column, -1, BLOCK_MAPPING_START_TOKEN, parser.mark) { + return false + } + } + + // Simple keys after ':' are allowed in the block context. + parser.simple_key_allowed = parser.flow_level == 0 + } + + // Consume the token. + start_mark := parser.mark + parser.skip() + end_mark := parser.mark + + // Create the VALUE token and append it to the queue. + token := Token{ + Type: VALUE_TOKEN, + StartMark: start_mark, + EndMark: end_mark, + } + parser.insertToken(-1, &token) + return true +} + +// Produce the ALIAS or ANCHOR token. +func (parser *Parser) fetchAnchor(typ TokenType) bool { + // An anchor or an alias could be a simple key. + if !parser.saveSimpleKey() { + return false + } + + // A simple key cannot follow an anchor or an alias. + parser.simple_key_allowed = false + + // Create the ALIAS or ANCHOR token and append it to the queue. + var token Token + if !parser.scanAnchor(&token, typ) { + return false + } + parser.insertToken(-1, &token) + return true +} + +// Produce the TAG token. +func (parser *Parser) fetchTag() bool { + // A tag could be a simple key. + if !parser.saveSimpleKey() { + return false + } + + // A simple key cannot follow a tag. + parser.simple_key_allowed = false + + // Create the TAG token and append it to the queue. + var token Token + if !parser.scanTag(&token) { + return false + } + parser.insertToken(-1, &token) + return true +} + +// Produce the SCALAR(...,literal) or SCALAR(...,folded) tokens. +func (parser *Parser) fetchBlockScalar(literal bool) bool { + // Remove any potential simple keys. + if !parser.removeSimpleKey() { + return false + } + + // A simple key may follow a block scalar. + parser.simple_key_allowed = true + + // Create the SCALAR token and append it to the queue. + var token Token + if !parser.scanBlockScalar(&token, literal) { + return false + } + parser.insertToken(-1, &token) + return true +} + +// Produce the SCALAR(...,single-quoted) or SCALAR(...,double-quoted) tokens. +func (parser *Parser) fetchFlowScalar(single bool) bool { + // A plain scalar could be a simple key. + if !parser.saveSimpleKey() { + return false + } + + // A simple key cannot follow a flow scalar. + parser.simple_key_allowed = false + + // Create the SCALAR token and append it to the queue. + var token Token + if !parser.scanFlowScalar(&token, single) { + return false + } + parser.insertToken(-1, &token) + return true +} + +// Produce the SCALAR(...,plain) token. +func (parser *Parser) fetchPlainScalar() bool { + // A plain scalar could be a simple key. + if !parser.saveSimpleKey() { + return false + } + + // A simple key cannot follow a flow scalar. + parser.simple_key_allowed = false + + // Create the SCALAR token and append it to the queue. + var token Token + if !parser.scanPlainScalar(&token) { + return false + } + parser.insertToken(-1, &token) + return true +} + +// Eat whitespaces and comments until the next token is found. +func (parser *Parser) scanToNextToken() bool { + scan_mark := parser.mark + + // Until the next token is not found. + for { + // Allow the BOM mark to start a line. + if parser.unread < 1 && !parser.updateBuffer(1) { + return false + } + if parser.mark.Column == 0 && isBOM(parser.buffer, parser.buffer_pos) { + parser.skip() + } + + // Eat whitespaces. + // Tabs are allowed: + // - in the flow context + // - in the block context, but not at the beginning of the line or + // after '-', '?', or ':' (complex value). + if parser.unread < 1 && !parser.updateBuffer(1) { + return false + } + + for parser.buffer[parser.buffer_pos] == ' ' || ((parser.flow_level > 0 || !parser.simple_key_allowed) && parser.buffer[parser.buffer_pos] == '\t') { + parser.skip() + if parser.unread < 1 && !parser.updateBuffer(1) { + return false + } + } + + // Check if we just had a line comment under a sequence entry that + // looks more like a header to the following content. Similar to this: + // + // - # The comment + // - Some data + // + // If so, transform the line comment to a head comment and reposition. + if len(parser.comments) > 0 && len(parser.tokens) > 1 { + tokenA := parser.tokens[len(parser.tokens)-2] + tokenB := parser.tokens[len(parser.tokens)-1] + comment := &parser.comments[len(parser.comments)-1] + if tokenA.Type == BLOCK_SEQUENCE_START_TOKEN && tokenB.Type == BLOCK_ENTRY_TOKEN && len(comment.line) > 0 && !isLineBreak(parser.buffer, parser.buffer_pos) { + // If it was in the prior line, reposition so it becomes a + // header of the follow up token. Otherwise, keep it in place + // so it becomes a header of the former. + comment.head = comment.line + comment.line = nil + if comment.start_mark.Line == parser.mark.Line-1 { + comment.token_mark = parser.mark + } + } + } + + // Eat a comment until a line break. + if parser.buffer[parser.buffer_pos] == '#' { + if !parser.scanComments(scan_mark) { + return false + } + } + + // If it is a line break, eat it. + if isLineBreak(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !parser.updateBuffer(2) { + return false + } + parser.skipLine() + + // In the block context, a new line may start a simple key. + if parser.flow_level == 0 { + parser.simple_key_allowed = true + } + } else { + break // We have found a token. + } + } + + return true +} + +// Scan a YAML-DIRECTIVE or TAG-DIRECTIVE token. +// +// Scope: +// +// %YAML 1.1 # a comment \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +func (parser *Parser) scanDirective(token *Token) bool { + // Eat '%'. + start_mark := parser.mark + parser.skip() + + // Scan the directive name. + var name []byte + if !parser.scanDirectiveName(start_mark, &name) { + return false + } + + // Is it a YAML directive? + if bytes.Equal(name, []byte("YAML")) { + // Scan the VERSION directive value. + var major, minor int8 + if !parser.scanVersionDirectiveValue(start_mark, &major, &minor) { + return false + } + end_mark := parser.mark + + // Create a VERSION-DIRECTIVE token. + *token = Token{ + Type: VERSION_DIRECTIVE_TOKEN, + StartMark: start_mark, + EndMark: end_mark, + major: major, + minor: minor, + } + + // Is it a TAG directive? + } else if bytes.Equal(name, []byte("TAG")) { + // Scan the TAG directive value. + var handle, prefix []byte + if !parser.scanTagDirectiveValue(start_mark, &handle, &prefix) { + return false + } + end_mark := parser.mark + + // Create a TAG-DIRECTIVE token. + *token = Token{ + Type: TAG_DIRECTIVE_TOKEN, + StartMark: start_mark, + EndMark: end_mark, + Value: handle, + prefix: prefix, + } + + // Unknown directive. + } else { + parser.setScannerError("while scanning a directive", + start_mark, "found unknown directive name") + return false + } + + // Eat the rest of the line including any comments. + if parser.unread < 1 && !parser.updateBuffer(1) { + return false + } + + for isBlank(parser.buffer, parser.buffer_pos) { + parser.skip() + if parser.unread < 1 && !parser.updateBuffer(1) { + return false + } + } + + if parser.buffer[parser.buffer_pos] == '#' { + // [Go] Discard this inline comment for the time being. + //if !parser.ScanLineComment(start_mark) { + // return false + //} + for !isBreakOrZero(parser.buffer, parser.buffer_pos) { + parser.skip() + if parser.unread < 1 && !parser.updateBuffer(1) { + return false + } + } + } + + // Check if we are at the end of the line. + if !isBreakOrZero(parser.buffer, parser.buffer_pos) { + parser.setScannerError("while scanning a directive", + start_mark, "did not find expected comment or line break") + return false + } + + // Eat a line break. + if isLineBreak(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !parser.updateBuffer(2) { + return false + } + parser.skipLine() + } + + return true +} + +// Scan the directive name. +// +// Scope: +// +// %YAML 1.1 # a comment \n +// ^^^^ +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^ +func (parser *Parser) scanDirectiveName(start_mark Mark, name *[]byte) bool { + // Consume the directive name. + if parser.unread < 1 && !parser.updateBuffer(1) { + return false + } + + var s []byte + for isAlpha(parser.buffer, parser.buffer_pos) { + s = parser.read(s) + if parser.unread < 1 && !parser.updateBuffer(1) { + return false + } + } + + // Check if the name is empty. + if len(s) == 0 { + parser.setScannerError("while scanning a directive", + start_mark, "could not find expected directive name") + return false + } + + // Check for an blank character after the name. + if !isBlankOrZero(parser.buffer, parser.buffer_pos) { + parser.setScannerError("while scanning a directive", + start_mark, "found unexpected non-alphabetical character") + return false + } + *name = s + return true +} + +// Scan the value of VERSION-DIRECTIVE. +// +// Scope: +// +// %YAML 1.1 # a comment \n +// ^^^^^^ +func (parser *Parser) scanVersionDirectiveValue(start_mark Mark, major, minor *int8) bool { + // Eat whitespaces. + if parser.unread < 1 && !parser.updateBuffer(1) { + return false + } + for isBlank(parser.buffer, parser.buffer_pos) { + parser.skip() + if parser.unread < 1 && !parser.updateBuffer(1) { + return false + } + } + + // Consume the major version number. + if !parser.scanVersionDirectiveNumber(start_mark, major) { + return false + } + + // Eat '.'. + if parser.buffer[parser.buffer_pos] != '.' { + return parser.setScannerError("while scanning a %YAML directive", + start_mark, "did not find expected digit or '.' character") + } + + parser.skip() + + // Consume the minor version number. + if !parser.scanVersionDirectiveNumber(start_mark, minor) { + return false + } + return true +} + +const max_number_length = 2 + +// Scan the version number of VERSION-DIRECTIVE. +// +// Scope: +// +// %YAML 1.1 # a comment \n +// ^ +// %YAML 1.1 # a comment \n +// ^ +func (parser *Parser) scanVersionDirectiveNumber(start_mark Mark, number *int8) bool { + // Repeat while the next character is digit. + if parser.unread < 1 && !parser.updateBuffer(1) { + return false + } + var value, length int8 + for isDigit(parser.buffer, parser.buffer_pos) { + // Check if the number is too long. + length++ + if length > max_number_length { + return parser.setScannerError("while scanning a %YAML directive", + start_mark, "found extremely long version number") + } + value = value*10 + int8(asDigit(parser.buffer, parser.buffer_pos)) + parser.skip() + if parser.unread < 1 && !parser.updateBuffer(1) { + return false + } + } + + // Check if the number was present. + if length == 0 { + return parser.setScannerError("while scanning a %YAML directive", + start_mark, "did not find expected version number") + } + *number = value + return true +} + +// Scan the value of a TAG-DIRECTIVE token. +// +// Scope: +// +// %TAG !yaml! tag:yaml.org,2002: \n +// ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +func (parser *Parser) scanTagDirectiveValue(start_mark Mark, handle, prefix *[]byte) bool { + var handle_value, prefix_value []byte + + // Eat whitespaces. + if parser.unread < 1 && !parser.updateBuffer(1) { + return false + } + + for isBlank(parser.buffer, parser.buffer_pos) { + parser.skip() + if parser.unread < 1 && !parser.updateBuffer(1) { + return false + } + } + + // Scan a handle. + if !parser.scanTagHandle(true, start_mark, &handle_value) { + return false + } + + // Expect a whitespace. + if parser.unread < 1 && !parser.updateBuffer(1) { + return false + } + if !isBlank(parser.buffer, parser.buffer_pos) { + parser.setScannerError("while scanning a %TAG directive", + start_mark, "did not find expected whitespace") + return false + } + + // Eat whitespaces. + for isBlank(parser.buffer, parser.buffer_pos) { + parser.skip() + if parser.unread < 1 && !parser.updateBuffer(1) { + return false + } + } + + // Scan a prefix. + if !parser.scanTagURI(true, nil, start_mark, &prefix_value) { + return false + } + + // Expect a whitespace or line break. + if parser.unread < 1 && !parser.updateBuffer(1) { + return false + } + if !isBlankOrZero(parser.buffer, parser.buffer_pos) { + parser.setScannerError("while scanning a %TAG directive", + start_mark, "did not find expected whitespace or line break") + return false + } + + *handle = handle_value + *prefix = prefix_value + return true +} + +func (parser *Parser) scanAnchor(token *Token, typ TokenType) bool { + var s []byte + + // Eat the indicator character. + start_mark := parser.mark + parser.skip() + + // Consume the value. + if parser.unread < 1 && !parser.updateBuffer(1) { + return false + } + + for isAnchorChar(parser.buffer, parser.buffer_pos) { + s = parser.read(s) + if parser.unread < 1 && !parser.updateBuffer(1) { + return false + } + } + + end_mark := parser.mark + + /* + * Check if length of the anchor is greater than 0 and it is followed by + * a whitespace character or one of the indicators: + * + * '?', ':', ',', ']', '}', '%', '@', '`'. + */ + + if len(s) == 0 || + !(isBlankOrZero(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == '?' || + parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == ',' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '}' || + parser.buffer[parser.buffer_pos] == '%' || parser.buffer[parser.buffer_pos] == '@' || + parser.buffer[parser.buffer_pos] == '`') { + context := "while scanning an alias" + if typ == ANCHOR_TOKEN { + context = "while scanning an anchor" + } + parser.setScannerError(context, start_mark, + "did not find expected alphabetic or numeric character") + return false + } + + // Create a token. + *token = Token{ + Type: typ, + StartMark: start_mark, + EndMark: end_mark, + Value: s, + } + + return true +} + +/* + * Scan a TAG token. + */ + +func (parser *Parser) scanTag(token *Token) bool { + var handle, suffix []byte + + start_mark := parser.mark + + // Check if the tag is in the canonical form. + if parser.unread < 2 && !parser.updateBuffer(2) { + return false + } + + if parser.buffer[parser.buffer_pos+1] == '<' { + // Keep the handle as '' + + // Eat '!<' + parser.skip() + parser.skip() + + // Consume the tag value. + if !parser.scanTagURI(false, nil, start_mark, &suffix) { + return false + } + + // Check for '>' and eat it. + if parser.buffer[parser.buffer_pos] != '>' { + parser.setScannerError("while scanning a tag", + start_mark, "did not find the expected '>'") + return false + } + + parser.skip() + } else { + // The tag has either the '!suffix' or the '!handle!suffix' form. + + // First, try to scan a handle. + if !parser.scanTagHandle(false, start_mark, &handle) { + return false + } + + // Check if it is, indeed, handle. + if handle[0] == '!' && len(handle) > 1 && handle[len(handle)-1] == '!' { + // Scan the suffix now. + if !parser.scanTagURI(false, nil, start_mark, &suffix) { + return false + } + } else { + // It wasn't a handle after all. Scan the rest of the tag. + if !parser.scanTagURI(false, handle, start_mark, &suffix) { + return false + } + + // Set the handle to '!'. + handle = []byte{'!'} + + // A special case: the '!' tag. Set the handle to '' and the + // suffix to '!'. + if len(suffix) == 0 { + handle, suffix = suffix, handle + } + } + } + + // Check the character which ends the tag. + if parser.unread < 1 && !parser.updateBuffer(1) { + return false + } + if !isBlankOrZero(parser.buffer, parser.buffer_pos) { + parser.setScannerError("while scanning a tag", + start_mark, "did not find expected whitespace or line break") + return false + } + + end_mark := parser.mark + + // Create a token. + *token = Token{ + Type: TAG_TOKEN, + StartMark: start_mark, + EndMark: end_mark, + Value: handle, + suffix: suffix, + } + return true +} + +// Scan a tag handle. +func (parser *Parser) scanTagHandle(directive bool, start_mark Mark, handle *[]byte) bool { + // Check the initial '!' character. + if parser.unread < 1 && !parser.updateBuffer(1) { + return false + } + if parser.buffer[parser.buffer_pos] != '!' { + parser.setScannerTagError(directive, + start_mark, "did not find expected '!'") + return false + } + + var s []byte + + // Copy the '!' character. + s = parser.read(s) + + // Copy all subsequent alphabetical and numerical characters. + if parser.unread < 1 && !parser.updateBuffer(1) { + return false + } + for isAlpha(parser.buffer, parser.buffer_pos) { + s = parser.read(s) + if parser.unread < 1 && !parser.updateBuffer(1) { + return false + } + } + + // Check if the trailing character is '!' and copy it. + if parser.buffer[parser.buffer_pos] == '!' { + s = parser.read(s) + } else { + // It's either the '!' tag or not really a tag handle. If it's a %TAG + // directive, it's an error. If it's a tag token, it must be a part of URI. + if directive && string(s) != "!" { + parser.setScannerTagError(directive, + start_mark, "did not find expected '!'") + return false + } + } + + *handle = s + return true +} + +// Scan a tag. +func (parser *Parser) scanTagURI(directive bool, head []byte, start_mark Mark, uri *[]byte) bool { + // size_t length = head ? strlen((char *)head) : 0 + var s []byte + hasTag := len(head) > 0 + + // Copy the head if needed. + // + // Note that we don't copy the leading '!' character. + if len(head) > 1 { + s = append(s, head[1:]...) + } + + // Scan the tag. + if parser.unread < 1 && !parser.updateBuffer(1) { + return false + } + + // The set of characters that may appear in URI is as follows: + // + // '0'-'9', 'A'-'Z', 'a'-'z', '_', '-', ';', '/', '?', ':', '@', '&', + // '=', '+', '$', ',', '.', '!', '~', '*', '\'', '(', ')', '[', ']', + // '%'. + // [Go] TODO Convert this into more reasonable logic. + for isAlpha(parser.buffer, parser.buffer_pos) || parser.buffer[parser.buffer_pos] == ';' || + parser.buffer[parser.buffer_pos] == '/' || parser.buffer[parser.buffer_pos] == '?' || + parser.buffer[parser.buffer_pos] == ':' || parser.buffer[parser.buffer_pos] == '@' || + parser.buffer[parser.buffer_pos] == '&' || parser.buffer[parser.buffer_pos] == '=' || + parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '$' || + parser.buffer[parser.buffer_pos] == ',' || parser.buffer[parser.buffer_pos] == '.' || + parser.buffer[parser.buffer_pos] == '!' || parser.buffer[parser.buffer_pos] == '~' || + parser.buffer[parser.buffer_pos] == '*' || parser.buffer[parser.buffer_pos] == '\'' || + parser.buffer[parser.buffer_pos] == '(' || parser.buffer[parser.buffer_pos] == ')' || + parser.buffer[parser.buffer_pos] == '[' || parser.buffer[parser.buffer_pos] == ']' || + parser.buffer[parser.buffer_pos] == '%' { + // Check if it is a URI-escape sequence. + if parser.buffer[parser.buffer_pos] == '%' { + if !parser.scanURIEscapes(directive, start_mark, &s) { + return false + } + } else { + s = parser.read(s) + } + if parser.unread < 1 && !parser.updateBuffer(1) { + return false + } + hasTag = true + } + + if !hasTag { + parser.setScannerTagError(directive, + start_mark, "did not find expected tag URI") + return false + } + *uri = s + return true +} + +// Decode an URI-escape sequence corresponding to a single UTF-8 character. +func (parser *Parser) scanURIEscapes(directive bool, start_mark Mark, s *[]byte) bool { + // Decode the required number of characters. + w := 1024 + for w > 0 { + // Check for a URI-escaped octet. + if parser.unread < 3 && !parser.updateBuffer(3) { + return false + } + + if !(parser.buffer[parser.buffer_pos] == '%' && + isHex(parser.buffer, parser.buffer_pos+1) && + isHex(parser.buffer, parser.buffer_pos+2)) { + return parser.setScannerTagError(directive, + start_mark, "did not find URI escaped octet") + } + + // Get the octet. + octet := byte((asHex(parser.buffer, parser.buffer_pos+1) << 4) + asHex(parser.buffer, parser.buffer_pos+2)) + + // If it is the leading octet, determine the length of the UTF-8 sequence. + if w == 1024 { + w = width(octet) + if w == 0 { + return parser.setScannerTagError(directive, + start_mark, "found an incorrect leading UTF-8 octet") + } + } else { + // Check if the trailing octet is correct. + if octet&0xC0 != 0x80 { + return parser.setScannerTagError(directive, + start_mark, "found an incorrect trailing UTF-8 octet") + } + } + + // Copy the octet and move the pointers. + *s = append(*s, octet) + parser.skip() + parser.skip() + parser.skip() + w-- + } + return true +} + +// Scan a block scalar. +func (parser *Parser) scanBlockScalar(token *Token, literal bool) bool { + // Eat the indicator '|' or '>'. + start_mark := parser.mark + parser.skip() + + // Scan the additional block scalar indicators. + if parser.unread < 1 && !parser.updateBuffer(1) { + return false + } + + // Check for a chomping indicator. + var chomping, increment int + if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { + // Set the chomping method and eat the indicator. + if parser.buffer[parser.buffer_pos] == '+' { + chomping = +1 + } else { + chomping = -1 + } + parser.skip() + + // Check for an indentation indicator. + if parser.unread < 1 && !parser.updateBuffer(1) { + return false + } + if isDigit(parser.buffer, parser.buffer_pos) { + // Check that the indentation is greater than 0. + if parser.buffer[parser.buffer_pos] == '0' { + parser.setScannerError("while scanning a block scalar", + start_mark, "found an indentation indicator equal to 0") + return false + } + + // Get the indentation level and eat the indicator. + increment = asDigit(parser.buffer, parser.buffer_pos) + parser.skip() + } + + } else if isDigit(parser.buffer, parser.buffer_pos) { + // Do the same as above, but in the opposite order. + + if parser.buffer[parser.buffer_pos] == '0' { + parser.setScannerError("while scanning a block scalar", + start_mark, "found an indentation indicator equal to 0") + return false + } + increment = asDigit(parser.buffer, parser.buffer_pos) + parser.skip() + + if parser.unread < 1 && !parser.updateBuffer(1) { + return false + } + if parser.buffer[parser.buffer_pos] == '+' || parser.buffer[parser.buffer_pos] == '-' { + if parser.buffer[parser.buffer_pos] == '+' { + chomping = +1 + } else { + chomping = -1 + } + parser.skip() + } + } + + // Eat whitespaces and comments to the end of the line. + if parser.unread < 1 && !parser.updateBuffer(1) { + return false + } + for isBlank(parser.buffer, parser.buffer_pos) { + parser.skip() + if parser.unread < 1 && !parser.updateBuffer(1) { + return false + } + } + if parser.buffer[parser.buffer_pos] == '#' { + if !parser.scanLineComment(start_mark) { + return false + } + for !isBreakOrZero(parser.buffer, parser.buffer_pos) { + parser.skip() + if parser.unread < 1 && !parser.updateBuffer(1) { + return false + } + } + } + + // Check if we are at the end of the line. + if !isBreakOrZero(parser.buffer, parser.buffer_pos) { + parser.setScannerError("while scanning a block scalar", + start_mark, "did not find expected comment or line break") + return false + } + + // Eat a line break. + if isLineBreak(parser.buffer, parser.buffer_pos) { + if parser.unread < 2 && !parser.updateBuffer(2) { + return false + } + parser.skipLine() + } + + end_mark := parser.mark + + // Set the indentation level if it was specified. + var indent int + if increment > 0 { + if parser.indent >= 0 { + indent = parser.indent + increment + } else { + indent = increment + } + } + + // Scan the leading line breaks and determine the indentation level if needed. + var s, leading_break, trailing_breaks []byte + if !parser.scanBlockScalarBreaks(&indent, &trailing_breaks, start_mark, &end_mark) { + return false + } + + // Scan the block scalar content. + if parser.unread < 1 && !parser.updateBuffer(1) { + return false + } + var leading_blank, trailing_blank bool + for parser.mark.Column == indent && !isZeroChar(parser.buffer, parser.buffer_pos) { + // We are at the beginning of a non-empty line. + + // Is it a trailing whitespace? + trailing_blank = isBlank(parser.buffer, parser.buffer_pos) + + // Check if we need to fold the leading line break. + if !literal && !leading_blank && !trailing_blank && len(leading_break) > 0 && leading_break[0] == '\n' { + // Do we need to join the lines by space? + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } + } else { + s = append(s, leading_break...) + } + leading_break = leading_break[:0] + + // Append the remaining line breaks. + s = append(s, trailing_breaks...) + trailing_breaks = trailing_breaks[:0] + + // Is it a leading whitespace? + leading_blank = isBlank(parser.buffer, parser.buffer_pos) + + // Consume the current line. + for !isBreakOrZero(parser.buffer, parser.buffer_pos) { + s = parser.read(s) + if parser.unread < 1 && !parser.updateBuffer(1) { + return false + } + } + + // Consume the line break. + if parser.unread < 2 && !parser.updateBuffer(2) { + return false + } + + leading_break = parser.readLine(leading_break) + + // Eat the following indentation spaces and line breaks. + if !parser.scanBlockScalarBreaks(&indent, &trailing_breaks, start_mark, &end_mark) { + return false + } + } + + // Chomp the tail. + if chomping != -1 { + s = append(s, leading_break...) + } + if chomping == 1 { + s = append(s, trailing_breaks...) + } + + // Create a token. + *token = Token{ + Type: SCALAR_TOKEN, + StartMark: start_mark, + EndMark: end_mark, + Value: s, + Style: LITERAL_SCALAR_STYLE, + } + if !literal { + token.Style = FOLDED_SCALAR_STYLE + } + return true +} + +// Scan indentation spaces and line breaks for a block scalar. Determine the +// indentation level if needed. +func (parser *Parser) scanBlockScalarBreaks(indent *int, breaks *[]byte, start_mark Mark, end_mark *Mark) bool { + *end_mark = parser.mark + + // Eat the indentation spaces and line breaks. + max_indent := 0 + for { + // Eat the indentation spaces. + if parser.unread < 1 && !parser.updateBuffer(1) { + return false + } + for (*indent == 0 || parser.mark.Column < *indent) && isSpace(parser.buffer, parser.buffer_pos) { + parser.skip() + if parser.unread < 1 && !parser.updateBuffer(1) { + return false + } + } + if parser.mark.Column > max_indent { + max_indent = parser.mark.Column + } + + // Check for a tab character messing the indentation. + if (*indent == 0 || parser.mark.Column < *indent) && isTab(parser.buffer, parser.buffer_pos) { + return parser.setScannerError("while scanning a block scalar", + start_mark, "found a tab character where an indentation space is expected") + } + + // Have we found a non-empty line? + if !isLineBreak(parser.buffer, parser.buffer_pos) { + break + } + + // Consume the line break. + if parser.unread < 2 && !parser.updateBuffer(2) { + return false + } + // [Go] Should really be returning breaks instead. + *breaks = parser.readLine(*breaks) + *end_mark = parser.mark + } + + // Determine the indentation level if needed. + if *indent == 0 { + *indent = max_indent + if *indent < parser.indent+1 { + *indent = parser.indent + 1 + } + if *indent < 1 { + *indent = 1 + } + } + return true +} + +// Scan a quoted scalar. +func (parser *Parser) scanFlowScalar(token *Token, single bool) bool { + // Eat the left quote. + start_mark := parser.mark + parser.skip() + + // Consume the content of the quoted scalar. + var s, leading_break, trailing_breaks, whitespaces []byte + for { + // Check that there are no document indicators at the beginning of the line. + if parser.unread < 4 && !parser.updateBuffer(4) { + return false + } + + if parser.mark.Column == 0 && + ((parser.buffer[parser.buffer_pos+0] == '-' && + parser.buffer[parser.buffer_pos+1] == '-' && + parser.buffer[parser.buffer_pos+2] == '-') || + (parser.buffer[parser.buffer_pos+0] == '.' && + parser.buffer[parser.buffer_pos+1] == '.' && + parser.buffer[parser.buffer_pos+2] == '.')) && + isBlankOrZero(parser.buffer, parser.buffer_pos+3) { + parser.setScannerError("while scanning a quoted scalar", + start_mark, "found unexpected document indicator") + return false + } + + // Check for EOF. + if isZeroChar(parser.buffer, parser.buffer_pos) { + parser.setScannerError("while scanning a quoted scalar", + start_mark, "found unexpected end of stream") + return false + } + + // Consume non-blank characters. + leading_blanks := false + for !isBlankOrZero(parser.buffer, parser.buffer_pos) { + if single && parser.buffer[parser.buffer_pos] == '\'' && parser.buffer[parser.buffer_pos+1] == '\'' { + // Is is an escaped single quote. + s = append(s, '\'') + parser.skip() + parser.skip() + + } else if single && parser.buffer[parser.buffer_pos] == '\'' { + // It is a right single quote. + break + } else if !single && parser.buffer[parser.buffer_pos] == '"' { + // It is a right double quote. + break + } else if !single && parser.buffer[parser.buffer_pos] == '\\' && isLineBreak(parser.buffer, parser.buffer_pos+1) { + // It is an escaped line break. + if parser.unread < 3 && !parser.updateBuffer(3) { + return false + } + parser.skip() + parser.skipLine() + leading_blanks = true + break + + } else if !single && parser.buffer[parser.buffer_pos] == '\\' { + // It is an escape sequence. + code_length := 0 + + // Check the escape character. + switch parser.buffer[parser.buffer_pos+1] { + case '0': + s = append(s, 0) + case 'a': + s = append(s, '\x07') + case 'b': + s = append(s, '\x08') + case 't', '\t': + s = append(s, '\x09') + case 'n': + s = append(s, '\x0A') + case 'v': + s = append(s, '\x0B') + case 'f': + s = append(s, '\x0C') + case 'r': + s = append(s, '\x0D') + case 'e': + s = append(s, '\x1B') + case ' ': + s = append(s, '\x20') + case '"': + s = append(s, '"') + case '\'': + s = append(s, '\'') + case '\\': + s = append(s, '\\') + case 'N': // NEL (#x85) + s = append(s, '\xC2') + s = append(s, '\x85') + case '_': // #xA0 + s = append(s, '\xC2') + s = append(s, '\xA0') + case 'L': // LS (#x2028) + s = append(s, '\xE2') + s = append(s, '\x80') + s = append(s, '\xA8') + case 'P': // PS (#x2029) + s = append(s, '\xE2') + s = append(s, '\x80') + s = append(s, '\xA9') + case 'x': + code_length = 2 + case 'u': + code_length = 4 + case 'U': + code_length = 8 + default: + parser.setScannerError("while parsing a quoted scalar", + start_mark, "found unknown escape character") + return false + } + + parser.skip() + parser.skip() + + // Consume an arbitrary escape code. + if code_length > 0 { + var value int + + // Scan the character value. + if parser.unread < code_length && !parser.updateBuffer(code_length) { + return false + } + for k := 0; k < code_length; k++ { + if !isHex(parser.buffer, parser.buffer_pos+k) { + parser.setScannerError("while parsing a quoted scalar", + start_mark, "did not find expected hexadecimal number") + return false + } + value = (value << 4) + asHex(parser.buffer, parser.buffer_pos+k) + } + + // Check the value and write the character. + if (value >= 0xD800 && value <= 0xDFFF) || value > 0x10FFFF { + parser.setScannerError("while parsing a quoted scalar", + start_mark, "found invalid Unicode character escape code") + return false + } + if value <= 0x7F { + s = append(s, byte(value)) + } else if value <= 0x7FF { + s = append(s, byte(0xC0+(value>>6))) + s = append(s, byte(0x80+(value&0x3F))) + } else if value <= 0xFFFF { + s = append(s, byte(0xE0+(value>>12))) + s = append(s, byte(0x80+((value>>6)&0x3F))) + s = append(s, byte(0x80+(value&0x3F))) + } else { + s = append(s, byte(0xF0+(value>>18))) + s = append(s, byte(0x80+((value>>12)&0x3F))) + s = append(s, byte(0x80+((value>>6)&0x3F))) + s = append(s, byte(0x80+(value&0x3F))) + } + + // Advance the pointer. + for k := 0; k < code_length; k++ { + parser.skip() + } + } + } else { + // It is a non-escaped non-blank character. + s = parser.read(s) + } + if parser.unread < 2 && !parser.updateBuffer(2) { + return false + } + } + + if parser.unread < 1 && !parser.updateBuffer(1) { + return false + } + + // Check if we are at the end of the scalar. + if single { + if parser.buffer[parser.buffer_pos] == '\'' { + break + } + } else { + if parser.buffer[parser.buffer_pos] == '"' { + break + } + } + + // Consume blank characters. + for isBlank(parser.buffer, parser.buffer_pos) || isLineBreak(parser.buffer, parser.buffer_pos) { + if isBlank(parser.buffer, parser.buffer_pos) { + // Consume a space or a tab character. + if !leading_blanks { + whitespaces = parser.read(whitespaces) + } else { + parser.skip() + } + } else { + if parser.unread < 2 && !parser.updateBuffer(2) { + return false + } + + // Check if it is a first line break. + if !leading_blanks { + whitespaces = whitespaces[:0] + leading_break = parser.readLine(leading_break) + leading_blanks = true + } else { + trailing_breaks = parser.readLine(trailing_breaks) + } + } + if parser.unread < 1 && !parser.updateBuffer(1) { + return false + } + } + + // Join the whitespaces or fold line breaks. + if leading_blanks { + // Do we need to fold line breaks? + if len(leading_break) > 0 && leading_break[0] == '\n' { + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } else { + s = append(s, trailing_breaks...) + } + } else { + s = append(s, leading_break...) + s = append(s, trailing_breaks...) + } + trailing_breaks = trailing_breaks[:0] + leading_break = leading_break[:0] + } else { + s = append(s, whitespaces...) + whitespaces = whitespaces[:0] + } + } + + // Eat the right quote. + parser.skip() + end_mark := parser.mark + + // Create a token. + *token = Token{ + Type: SCALAR_TOKEN, + StartMark: start_mark, + EndMark: end_mark, + Value: s, + Style: SINGLE_QUOTED_SCALAR_STYLE, + } + if !single { + token.Style = DOUBLE_QUOTED_SCALAR_STYLE + } + return true +} + +// Scan a plain scalar. +func (parser *Parser) scanPlainScalar(token *Token) bool { + var s, leading_break, trailing_breaks, whitespaces []byte + var leading_blanks bool + indent := parser.indent + 1 + + start_mark := parser.mark + end_mark := parser.mark + + // Consume the content of the plain scalar. + for { + // Check for a document indicator. + if parser.unread < 4 && !parser.updateBuffer(4) { + return false + } + if parser.mark.Column == 0 && + ((parser.buffer[parser.buffer_pos+0] == '-' && + parser.buffer[parser.buffer_pos+1] == '-' && + parser.buffer[parser.buffer_pos+2] == '-') || + (parser.buffer[parser.buffer_pos+0] == '.' && + parser.buffer[parser.buffer_pos+1] == '.' && + parser.buffer[parser.buffer_pos+2] == '.')) && + isBlankOrZero(parser.buffer, parser.buffer_pos+3) { + break + } + + // Check for a comment. + if parser.buffer[parser.buffer_pos] == '#' { + break + } + + // Consume non-blank characters. + for !isBlankOrZero(parser.buffer, parser.buffer_pos) { + + // Check for indicators that may end a plain scalar. + if (parser.buffer[parser.buffer_pos] == ':' && isBlankOrZero(parser.buffer, parser.buffer_pos+1)) || + (parser.flow_level > 0 && + (parser.buffer[parser.buffer_pos] == ',' || + (parser.buffer[parser.buffer_pos] == '?' && isBlankOrZero(parser.buffer, parser.buffer_pos+1)) || + parser.buffer[parser.buffer_pos] == '[' || + parser.buffer[parser.buffer_pos] == ']' || parser.buffer[parser.buffer_pos] == '{' || + parser.buffer[parser.buffer_pos] == '}')) { + break + } + + // Check if we need to join whitespaces and breaks. + if leading_blanks || len(whitespaces) > 0 { + if leading_blanks { + // Do we need to fold line breaks? + if leading_break[0] == '\n' { + if len(trailing_breaks) == 0 { + s = append(s, ' ') + } else { + s = append(s, trailing_breaks...) + } + } else { + s = append(s, leading_break...) + s = append(s, trailing_breaks...) + } + trailing_breaks = trailing_breaks[:0] + leading_break = leading_break[:0] + leading_blanks = false + } else { + s = append(s, whitespaces...) + whitespaces = whitespaces[:0] + } + } + + // Copy the character. + s = parser.read(s) + + end_mark = parser.mark + if parser.unread < 2 && !parser.updateBuffer(2) { + return false + } + } + + // Is it the end? + if !(isBlank(parser.buffer, parser.buffer_pos) || isLineBreak(parser.buffer, parser.buffer_pos)) { + break + } + + // Consume blank characters. + if parser.unread < 1 && !parser.updateBuffer(1) { + return false + } + + for isBlank(parser.buffer, parser.buffer_pos) || isLineBreak(parser.buffer, parser.buffer_pos) { + if isBlank(parser.buffer, parser.buffer_pos) { + + // Check for tab characters that abuse indentation. + if leading_blanks && parser.mark.Column < indent && isTab(parser.buffer, parser.buffer_pos) { + parser.setScannerError("while scanning a plain scalar", + start_mark, "found a tab character that violates indentation") + return false + } + + // Consume a space or a tab character. + if !leading_blanks { + whitespaces = parser.read(whitespaces) + } else { + parser.skip() + } + } else { + if parser.unread < 2 && !parser.updateBuffer(2) { + return false + } + + // Check if it is a first line break. + if !leading_blanks { + whitespaces = whitespaces[:0] + leading_break = parser.readLine(leading_break) + leading_blanks = true + } else { + trailing_breaks = parser.readLine(trailing_breaks) + } + } + if parser.unread < 1 && !parser.updateBuffer(1) { + return false + } + } + + // Check indentation level. + if parser.flow_level == 0 && parser.mark.Column < indent { + break + } + } + + // Create a token. + *token = Token{ + Type: SCALAR_TOKEN, + StartMark: start_mark, + EndMark: end_mark, + Value: s, + Style: PLAIN_SCALAR_STYLE, + } + + // Note that we change the 'simple_key_allowed' flag. + if leading_blanks { + parser.simple_key_allowed = true + } + return true +} + +func (parser *Parser) scanLineComment(token_mark Mark) bool { + if parser.newlines > 0 { + return true + } + + var start_mark Mark + var text []byte + + for peek := 0; peek < 512; peek++ { + if parser.unread < peek+1 && !parser.updateBuffer(peek+1) { + break + } + if isBlank(parser.buffer, parser.buffer_pos+peek) { + continue + } + if parser.buffer[parser.buffer_pos+peek] == '#' { + seen := parser.mark.Index + peek + for { + if parser.unread < 1 && !parser.updateBuffer(1) { + return false + } + if isBreakOrZero(parser.buffer, parser.buffer_pos) { + if parser.mark.Index >= seen { + break + } + if parser.unread < 2 && !parser.updateBuffer(2) { + return false + } + parser.skipLine() + } else if parser.mark.Index >= seen { + if len(text) == 0 { + start_mark = parser.mark + } + text = parser.read(text) + } else { + parser.skip() + } + } + } + break + } + if len(text) > 0 { + parser.comments = append(parser.comments, Comment{ + token_mark: token_mark, + start_mark: start_mark, + line: text, + }) + } + return true +} + +func (parser *Parser) scanComments(scan_mark Mark) bool { + token := parser.tokens[len(parser.tokens)-1] + + if token.Type == FLOW_ENTRY_TOKEN && len(parser.tokens) > 1 { + token = parser.tokens[len(parser.tokens)-2] + } + + token_mark := token.StartMark + var start_mark Mark + next_indent := parser.indent + if next_indent < 0 { + next_indent = 0 + } + + recent_empty := false + first_empty := parser.newlines <= 1 + + line := parser.mark.Line + column := parser.mark.Column + + var text []byte + + // The foot line is the place where a comment must start to + // still be considered as a foot of the prior content. + // If there's some content in the currently parsed line, then + // the foot is the line below it. + foot_line := -1 + if scan_mark.Line > 0 { + foot_line = parser.mark.Line - parser.newlines + 1 + if parser.newlines == 0 && parser.mark.Column > 1 { + foot_line++ + } + } + + peek := 0 + for ; peek < 512; peek++ { + if parser.unread < peek+1 && !parser.updateBuffer(peek+1) { + break + } + column++ + if isBlank(parser.buffer, parser.buffer_pos+peek) { + continue + } + c := parser.buffer[parser.buffer_pos+peek] + close_flow := parser.flow_level > 0 && (c == ']' || c == '}') + if close_flow || isBreakOrZero(parser.buffer, parser.buffer_pos+peek) { + // Got line break or terminator. + if close_flow || !recent_empty { + if close_flow || first_empty && (start_mark.Line == foot_line && token.Type != VALUE_TOKEN || start_mark.Column-1 < next_indent) { + // This is the first empty line and there were no empty lines before, + // so this initial part of the comment is a foot of the prior token + // instead of being a head for the following one. Split it up. + // Alternatively, this might also be the last comment inside a flow + // scope, so it must be a footer. + if len(text) > 0 { + if start_mark.Column-1 < next_indent { + // If dedented it's unrelated to the prior token. + token_mark = start_mark + } + parser.comments = append(parser.comments, Comment{ + scan_mark: scan_mark, + token_mark: token_mark, + start_mark: start_mark, + end_mark: Mark{parser.mark.Index + peek, line, column}, + foot: text, + }) + scan_mark = Mark{parser.mark.Index + peek, line, column} + token_mark = scan_mark + text = nil + } + } else { + if len(text) > 0 && parser.buffer[parser.buffer_pos+peek] != 0 { + text = append(text, '\n') + } + } + } + if !isLineBreak(parser.buffer, parser.buffer_pos+peek) { + break + } + first_empty = false + recent_empty = true + column = 0 + line++ + continue + } + + if len(text) > 0 && (close_flow || column-1 < next_indent && column != start_mark.Column) { + // The comment at the different indentation is a foot of the + // preceding data rather than a head of the upcoming one. + parser.comments = append(parser.comments, Comment{ + scan_mark: scan_mark, + token_mark: token_mark, + start_mark: start_mark, + end_mark: Mark{parser.mark.Index + peek, line, column}, + foot: text, + }) + scan_mark = Mark{parser.mark.Index + peek, line, column} + token_mark = scan_mark + text = nil + } + + if parser.buffer[parser.buffer_pos+peek] != '#' { + break + } + + if len(text) == 0 { + start_mark = Mark{parser.mark.Index + peek, line, column} + } else { + text = append(text, '\n') + } + + recent_empty = false + + // Consume until after the consumed comment line. + seen := parser.mark.Index + peek + for { + if parser.unread < 1 && !parser.updateBuffer(1) { + return false + } + if isBreakOrZero(parser.buffer, parser.buffer_pos) { + if parser.mark.Index >= seen { + break + } + if parser.unread < 2 && !parser.updateBuffer(2) { + return false + } + parser.skipLine() + } else if parser.mark.Index >= seen { + text = parser.read(text) + } else { + parser.skip() + } + } + + peek = 0 + column = 0 + line = parser.mark.Line + next_indent = parser.indent + if next_indent < 0 { + next_indent = 0 + } + } + + if len(text) > 0 { + parser.comments = append(parser.comments, Comment{ + scan_mark: scan_mark, + token_mark: start_mark, + start_mark: start_mark, + end_mark: Mark{parser.mark.Index + peek - 1, line, column}, + head: text, + }) + } + return true +} diff --git a/vendor/go.yaml.in/yaml/v4/internal/libyaml/writer.go b/vendor/go.yaml.in/yaml/v4/internal/libyaml/writer.go new file mode 100644 index 00000000..17d56959 --- /dev/null +++ b/vendor/go.yaml.in/yaml/v4/internal/libyaml/writer.go @@ -0,0 +1,43 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package libyaml + +import "fmt" + +// Flush the output buffer. +func (emitter *Emitter) flush() error { + if emitter.write_handler == nil { + panic("write handler not set") + } + + // Check if the buffer is empty. + if emitter.buffer_pos == 0 { + return nil + } + + if err := emitter.write_handler(emitter, emitter.buffer[:emitter.buffer_pos]); err != nil { + return fmt.Errorf("write error: %w", err) + } + emitter.buffer_pos = 0 + return nil +} diff --git a/vendor/go.yaml.in/yaml/v4/internal/libyaml/yaml.go b/vendor/go.yaml.in/yaml/v4/internal/libyaml/yaml.go new file mode 100644 index 00000000..50d1d588 --- /dev/null +++ b/vendor/go.yaml.in/yaml/v4/internal/libyaml/yaml.go @@ -0,0 +1,804 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package libyaml + +import ( + "fmt" + "io" +) + +// VersionDirective holds the YAML version directive data. +type VersionDirective struct { + major int8 // The major version number. + minor int8 // The minor version number. +} + +// TagDirective holds the YAML tag directive data. +type TagDirective struct { + handle []byte // The tag handle. + prefix []byte // The tag prefix. +} + +type Encoding int + +// The stream encoding. +const ( + // Let the parser choose the encoding. + ANY_ENCODING Encoding = iota + + UTF8_ENCODING // The default UTF-8 encoding. + UTF16LE_ENCODING // The UTF-16-LE encoding with BOM. + UTF16BE_ENCODING // The UTF-16-BE encoding with BOM. +) + +type LineBreak int + +// Line break types. +const ( + // Let the parser choose the break type. + ANY_BREAK LineBreak = iota + + CR_BREAK // Use CR for line breaks (Mac style). + LN_BREAK // Use LN for line breaks (Unix style). + CRLN_BREAK // Use CR LN for line breaks (DOS style). +) + +type ErrorType int + +// Many bad things could happen with the parser and emitter. +const ( + // No error is produced. + NO_ERROR ErrorType = iota + + MEMORY_ERROR // Cannot allocate or reallocate a block of memory. + READER_ERROR // Cannot read or decode the input stream. + SCANNER_ERROR // Cannot scan the input stream. + PARSER_ERROR // Cannot parse the input stream. + COMPOSER_ERROR // Cannot compose a YAML document. + WRITER_ERROR // Cannot write to the output stream. + EMITTER_ERROR // Cannot emit a YAML stream. +) + +// Mark holds the pointer position. +type Mark struct { + Index int // The position index. + Line int // The position line. + Column int // The position column. +} + +// Node Styles + +type Style int8 + +type ScalarStyle Style + +// Scalar styles. +const ( + // Let the emitter choose the style. + ANY_SCALAR_STYLE ScalarStyle = 0 + + PLAIN_SCALAR_STYLE ScalarStyle = 1 << iota // The plain scalar style. + SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style. + DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style. + LITERAL_SCALAR_STYLE // The literal scalar style. + FOLDED_SCALAR_STYLE // The folded scalar style. +) + +// String returns a string representation of a [ScalarStyle]. +func (style ScalarStyle) String() string { + switch style { + case PLAIN_SCALAR_STYLE: + return "Plain" + case SINGLE_QUOTED_SCALAR_STYLE: + return "Single" + case DOUBLE_QUOTED_SCALAR_STYLE: + return "Double" + case LITERAL_SCALAR_STYLE: + return "Literal" + case FOLDED_SCALAR_STYLE: + return "Folded" + default: + return "" + } +} + +type SequenceStyle Style + +// Sequence styles. +const ( + // Let the emitter choose the style. + ANY_SEQUENCE_STYLE SequenceStyle = iota + + BLOCK_SEQUENCE_STYLE // The block sequence style. + FLOW_SEQUENCE_STYLE // The flow sequence style. +) + +type MappingStyle Style + +// Mapping styles. +const ( + // Let the emitter choose the style. + ANY_MAPPING_STYLE MappingStyle = iota + + BLOCK_MAPPING_STYLE // The block mapping style. + FLOW_MAPPING_STYLE // The flow mapping style. +) + +// Tokens + +type TokenType int + +// Token types. +const ( + // An empty token. + NO_TOKEN TokenType = iota + + STREAM_START_TOKEN // A STREAM-START token. + STREAM_END_TOKEN // A STREAM-END token. + + VERSION_DIRECTIVE_TOKEN // A VERSION-DIRECTIVE token. + TAG_DIRECTIVE_TOKEN // A TAG-DIRECTIVE token. + DOCUMENT_START_TOKEN // A DOCUMENT-START token. + DOCUMENT_END_TOKEN // A DOCUMENT-END token. + + BLOCK_SEQUENCE_START_TOKEN // A BLOCK-SEQUENCE-START token. + BLOCK_MAPPING_START_TOKEN // A BLOCK-SEQUENCE-END token. + BLOCK_END_TOKEN // A BLOCK-END token. + + FLOW_SEQUENCE_START_TOKEN // A FLOW-SEQUENCE-START token. + FLOW_SEQUENCE_END_TOKEN // A FLOW-SEQUENCE-END token. + FLOW_MAPPING_START_TOKEN // A FLOW-MAPPING-START token. + FLOW_MAPPING_END_TOKEN // A FLOW-MAPPING-END token. + + BLOCK_ENTRY_TOKEN // A BLOCK-ENTRY token. + FLOW_ENTRY_TOKEN // A FLOW-ENTRY token. + KEY_TOKEN // A KEY token. + VALUE_TOKEN // A VALUE token. + + ALIAS_TOKEN // An ALIAS token. + ANCHOR_TOKEN // An ANCHOR token. + TAG_TOKEN // A TAG token. + SCALAR_TOKEN // A SCALAR token. +) + +func (tt TokenType) String() string { + switch tt { + case NO_TOKEN: + return "NO_TOKEN" + case STREAM_START_TOKEN: + return "STREAM_START_TOKEN" + case STREAM_END_TOKEN: + return "STREAM_END_TOKEN" + case VERSION_DIRECTIVE_TOKEN: + return "VERSION_DIRECTIVE_TOKEN" + case TAG_DIRECTIVE_TOKEN: + return "TAG_DIRECTIVE_TOKEN" + case DOCUMENT_START_TOKEN: + return "DOCUMENT_START_TOKEN" + case DOCUMENT_END_TOKEN: + return "DOCUMENT_END_TOKEN" + case BLOCK_SEQUENCE_START_TOKEN: + return "BLOCK_SEQUENCE_START_TOKEN" + case BLOCK_MAPPING_START_TOKEN: + return "BLOCK_MAPPING_START_TOKEN" + case BLOCK_END_TOKEN: + return "BLOCK_END_TOKEN" + case FLOW_SEQUENCE_START_TOKEN: + return "FLOW_SEQUENCE_START_TOKEN" + case FLOW_SEQUENCE_END_TOKEN: + return "FLOW_SEQUENCE_END_TOKEN" + case FLOW_MAPPING_START_TOKEN: + return "FLOW_MAPPING_START_TOKEN" + case FLOW_MAPPING_END_TOKEN: + return "FLOW_MAPPING_END_TOKEN" + case BLOCK_ENTRY_TOKEN: + return "BLOCK_ENTRY_TOKEN" + case FLOW_ENTRY_TOKEN: + return "FLOW_ENTRY_TOKEN" + case KEY_TOKEN: + return "KEY_TOKEN" + case VALUE_TOKEN: + return "VALUE_TOKEN" + case ALIAS_TOKEN: + return "ALIAS_TOKEN" + case ANCHOR_TOKEN: + return "ANCHOR_TOKEN" + case TAG_TOKEN: + return "TAG_TOKEN" + case SCALAR_TOKEN: + return "SCALAR_TOKEN" + } + return "" +} + +// Token holds information about a scanning token. +type Token struct { + // The token type. + Type TokenType + + // The start/end of the token. + StartMark, EndMark Mark + + // The stream encoding (for STREAM_START_TOKEN). + encoding Encoding + + // The alias/anchor/scalar Value or tag/tag directive handle + // (for ALIAS_TOKEN, ANCHOR_TOKEN, SCALAR_TOKEN, TAG_TOKEN, TAG_DIRECTIVE_TOKEN). + Value []byte + + // The tag suffix (for TAG_TOKEN). + suffix []byte + + // The tag directive prefix (for TAG_DIRECTIVE_TOKEN). + prefix []byte + + // The scalar Style (for SCALAR_TOKEN). + Style ScalarStyle + + // The version directive major/minor (for VERSION_DIRECTIVE_TOKEN). + major, minor int8 +} + +// Events + +type EventType int8 + +// Event types. +const ( + // An empty event. + NO_EVENT EventType = iota + + STREAM_START_EVENT // A STREAM-START event. + STREAM_END_EVENT // A STREAM-END event. + DOCUMENT_START_EVENT // A DOCUMENT-START event. + DOCUMENT_END_EVENT // A DOCUMENT-END event. + ALIAS_EVENT // An ALIAS event. + SCALAR_EVENT // A SCALAR event. + SEQUENCE_START_EVENT // A SEQUENCE-START event. + SEQUENCE_END_EVENT // A SEQUENCE-END event. + MAPPING_START_EVENT // A MAPPING-START event. + MAPPING_END_EVENT // A MAPPING-END event. + TAIL_COMMENT_EVENT +) + +var eventStrings = []string{ + NO_EVENT: "none", + STREAM_START_EVENT: "stream start", + STREAM_END_EVENT: "stream end", + DOCUMENT_START_EVENT: "document start", + DOCUMENT_END_EVENT: "document end", + ALIAS_EVENT: "alias", + SCALAR_EVENT: "scalar", + SEQUENCE_START_EVENT: "sequence start", + SEQUENCE_END_EVENT: "sequence end", + MAPPING_START_EVENT: "mapping start", + MAPPING_END_EVENT: "mapping end", + TAIL_COMMENT_EVENT: "tail comment", +} + +func (e EventType) String() string { + if e < 0 || int(e) >= len(eventStrings) { + return fmt.Sprintf("unknown event %d", e) + } + return eventStrings[e] +} + +// Event holds information about a parsing or emitting event. +type Event struct { + // The event type. + Type EventType + + // The start and end of the event. + StartMark, EndMark Mark + + // The document encoding (for STREAM_START_EVENT). + encoding Encoding + + // The version directive (for DOCUMENT_START_EVENT). + version_directive *VersionDirective + + // The list of tag directives (for DOCUMENT_START_EVENT). + tag_directives []TagDirective + + // The comments + HeadComment []byte + LineComment []byte + FootComment []byte + TailComment []byte + + // The Anchor (for SCALAR_EVENT, SEQUENCE_START_EVENT, MAPPING_START_EVENT, ALIAS_EVENT). + Anchor []byte + + // The Tag (for SCALAR_EVENT, SEQUENCE_START_EVENT, MAPPING_START_EVENT). + Tag []byte + + // The scalar Value (for SCALAR_EVENT). + Value []byte + + // Is the document start/end indicator Implicit, or the tag optional? + // (for DOCUMENT_START_EVENT, DOCUMENT_END_EVENT, SEQUENCE_START_EVENT, MAPPING_START_EVENT, SCALAR_EVENT). + Implicit bool + + // Is the tag optional for any non-plain style? (for SCALAR_EVENT). + quoted_implicit bool + + // The Style (for SCALAR_EVENT, SEQUENCE_START_EVENT, MAPPING_START_EVENT). + Style Style +} + +func (e *Event) ScalarStyle() ScalarStyle { return ScalarStyle(e.Style) } +func (e *Event) SequenceStyle() SequenceStyle { return SequenceStyle(e.Style) } +func (e *Event) MappingStyle() MappingStyle { return MappingStyle(e.Style) } + +// Nodes + +const ( + NULL_TAG = "tag:yaml.org,2002:null" // The tag !!null with the only possible value: null. + BOOL_TAG = "tag:yaml.org,2002:bool" // The tag !!bool with the values: true and false. + STR_TAG = "tag:yaml.org,2002:str" // The tag !!str for string values. + INT_TAG = "tag:yaml.org,2002:int" // The tag !!int for integer values. + FLOAT_TAG = "tag:yaml.org,2002:float" // The tag !!float for float values. + TIMESTAMP_TAG = "tag:yaml.org,2002:timestamp" // The tag !!timestamp for date and time values. + + SEQ_TAG = "tag:yaml.org,2002:seq" // The tag !!seq is used to denote sequences. + MAP_TAG = "tag:yaml.org,2002:map" // The tag !!map is used to denote mapping. + + // Not in original libyaml. + BINARY_TAG = "tag:yaml.org,2002:binary" + MERGE_TAG = "tag:yaml.org,2002:merge" + + DEFAULT_SCALAR_TAG = STR_TAG // The default scalar tag is !!str. + DEFAULT_SEQUENCE_TAG = SEQ_TAG // The default sequence tag is !!seq. + DEFAULT_MAPPING_TAG = MAP_TAG // The default mapping tag is !!map. +) + +type NodeType int + +// Node types. +const ( + // An empty node. + NO_NODE NodeType = iota + + SCALAR_NODE // A scalar node. + SEQUENCE_NODE // A sequence node. + MAPPING_NODE // A mapping node. +) + +// NodeItem represents an element of a sequence node. +type NodeItem int + +// NodePair represents an element of a mapping node. +type NodePair struct { + key int // The key of the element. + value int // The value of the element. +} + +// Node represents a single node in the YAML document tree. +type Node struct { + typ NodeType // The node type. + tag []byte // The node tag. + + // The node data. + + // The scalar parameters (for SCALAR_NODE). + scalar struct { + value []byte // The scalar value. + length int // The length of the scalar value. + style ScalarStyle // The scalar style. + } + + // The sequence parameters (for YAML_SEQUENCE_NODE). + sequence struct { + items_data []NodeItem // The stack of sequence items. + style SequenceStyle // The sequence style. + } + + // The mapping parameters (for MAPPING_NODE). + mapping struct { + pairs_data []NodePair // The stack of mapping pairs (key, value). + pairs_start *NodePair // The beginning of the stack. + pairs_end *NodePair // The end of the stack. + pairs_top *NodePair // The top of the stack. + style MappingStyle // The mapping style. + } + + start_mark Mark // The beginning of the node. + end_mark Mark // The end of the node. +} + +// Document structure. +type Document struct { + // The document nodes. + nodes []Node + + // The version directive. + version_directive *VersionDirective + + // The list of tag directives. + tag_directives_data []TagDirective + tag_directives_start int // The beginning of the tag directives list. + tag_directives_end int // The end of the tag directives list. + + start_implicit int // Is the document start indicator implicit? + end_implicit int // Is the document end indicator implicit? + + // The start/end of the document. + start_mark, end_mark Mark +} + +// ReadHandler is called when the [Parser] needs to read more bytes from the +// source. The handler should write not more than size bytes to the buffer. +// The number of written bytes should be set to the size_read variable. +// +// [in,out] data A pointer to an application data specified by +// +// yamlParser.setInput(). +// +// [out] buffer The buffer to write the data from the source. +// [in] size The size of the buffer. +// [out] size_read The actual number of bytes read from the source. +// +// On success, the handler should return 1. If the handler failed, +// the returned value should be 0. On EOF, the handler should set the +// size_read to 0 and return 1. +type ReadHandler func(parser *Parser, buffer []byte) (n int, err error) + +// SimpleKey holds information about a potential simple key. +type SimpleKey struct { + possible bool // Is a simple key possible? + required bool // Is a simple key required? + token_number int // The number of the token. + mark Mark // The position mark. +} + +// ParserState represents the state of the parser. +type ParserState int + +const ( + PARSE_STREAM_START_STATE ParserState = iota + + PARSE_IMPLICIT_DOCUMENT_START_STATE // Expect the beginning of an implicit document. + PARSE_DOCUMENT_START_STATE // Expect DOCUMENT-START. + PARSE_DOCUMENT_CONTENT_STATE // Expect the content of a document. + PARSE_DOCUMENT_END_STATE // Expect DOCUMENT-END. + PARSE_BLOCK_NODE_STATE // Expect a block node. + PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a block sequence. + PARSE_BLOCK_SEQUENCE_ENTRY_STATE // Expect an entry of a block sequence. + PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE // Expect an entry of an indentless sequence. + PARSE_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. + PARSE_BLOCK_MAPPING_KEY_STATE // Expect a block mapping key. + PARSE_BLOCK_MAPPING_VALUE_STATE // Expect a block mapping value. + PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE // Expect the first entry of a flow sequence. + PARSE_FLOW_SEQUENCE_ENTRY_STATE // Expect an entry of a flow sequence. + PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE // Expect a key of an ordered mapping. + PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE // Expect a value of an ordered mapping. + PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE // Expect the and of an ordered mapping entry. + PARSE_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. + PARSE_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. + PARSE_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. + PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE // Expect an empty value of a flow mapping. + PARSE_END_STATE // Expect nothing. +) + +func (ps ParserState) String() string { + switch ps { + case PARSE_STREAM_START_STATE: + return "PARSE_STREAM_START_STATE" + case PARSE_IMPLICIT_DOCUMENT_START_STATE: + return "PARSE_IMPLICIT_DOCUMENT_START_STATE" + case PARSE_DOCUMENT_START_STATE: + return "PARSE_DOCUMENT_START_STATE" + case PARSE_DOCUMENT_CONTENT_STATE: + return "PARSE_DOCUMENT_CONTENT_STATE" + case PARSE_DOCUMENT_END_STATE: + return "PARSE_DOCUMENT_END_STATE" + case PARSE_BLOCK_NODE_STATE: + return "PARSE_BLOCK_NODE_STATE" + case PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE: + return "PARSE_BLOCK_SEQUENCE_FIRST_ENTRY_STATE" + case PARSE_BLOCK_SEQUENCE_ENTRY_STATE: + return "PARSE_BLOCK_SEQUENCE_ENTRY_STATE" + case PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE: + return "PARSE_INDENTLESS_SEQUENCE_ENTRY_STATE" + case PARSE_BLOCK_MAPPING_FIRST_KEY_STATE: + return "PARSE_BLOCK_MAPPING_FIRST_KEY_STATE" + case PARSE_BLOCK_MAPPING_KEY_STATE: + return "PARSE_BLOCK_MAPPING_KEY_STATE" + case PARSE_BLOCK_MAPPING_VALUE_STATE: + return "PARSE_BLOCK_MAPPING_VALUE_STATE" + case PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE: + return "PARSE_FLOW_SEQUENCE_FIRST_ENTRY_STATE" + case PARSE_FLOW_SEQUENCE_ENTRY_STATE: + return "PARSE_FLOW_SEQUENCE_ENTRY_STATE" + case PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE: + return "PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_KEY_STATE" + case PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE: + return "PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_VALUE_STATE" + case PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE: + return "PARSE_FLOW_SEQUENCE_ENTRY_MAPPING_END_STATE" + case PARSE_FLOW_MAPPING_FIRST_KEY_STATE: + return "PARSE_FLOW_MAPPING_FIRST_KEY_STATE" + case PARSE_FLOW_MAPPING_KEY_STATE: + return "PARSE_FLOW_MAPPING_KEY_STATE" + case PARSE_FLOW_MAPPING_VALUE_STATE: + return "PARSE_FLOW_MAPPING_VALUE_STATE" + case PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE: + return "PARSE_FLOW_MAPPING_EMPTY_VALUE_STATE" + case PARSE_END_STATE: + return "PARSE_END_STATE" + } + return "" +} + +// AliasData holds information about aliases. +type AliasData struct { + anchor []byte // The anchor. + index int // The node id. + mark Mark // The anchor mark. +} + +// Parser structure holds all information about the current +// state of the parser. +type Parser struct { + // Error handling + + ErrorType ErrorType // Error type. + + Problem string // Error description. + + // The byte about which the problem occurred. + ProblemOffset int + ProblemValue int + ProblemMark Mark + + // The error Context. + Context string + ContextMark Mark + + // Reader stuff + + read_handler ReadHandler // Read handler. + + input_reader io.Reader // File input data. + input []byte // String input data. + input_pos int + + eof bool // EOF flag + + buffer []byte // The working buffer. + buffer_pos int // The current position of the buffer. + + unread int // The number of unread characters in the buffer. + + newlines int // The number of line breaks since last non-break/non-blank character + + raw_buffer []byte // The raw buffer. + raw_buffer_pos int // The current position of the buffer. + + encoding Encoding // The input encoding. + + offset int // The offset of the current position (in bytes). + mark Mark // The mark of the current position. + + // Comments + + HeadComment []byte // The current head comments + LineComment []byte // The current line comments + FootComment []byte // The current foot comments + tail_comment []byte // Foot comment that happens at the end of a block. + stem_comment []byte // Comment in item preceding a nested structure (list inside list item, etc) + + comments []Comment // The folded comments for all parsed tokens + comments_head int + + // Scanner stuff + + stream_start_produced bool // Have we started to scan the input stream? + stream_end_produced bool // Have we reached the end of the input stream? + + flow_level int // The number of unclosed '[' and '{' indicators. + + tokens []Token // The tokens queue. + tokens_head int // The head of the tokens queue. + tokens_parsed int // The number of tokens fetched from the queue. + token_available bool // Does the tokens queue contain a token ready for dequeueing. + + indent int // The current indentation level. + indents []int // The indentation levels stack. + + simple_key_allowed bool // May a simple key occur at the current position? + simple_keys []SimpleKey // The stack of simple keys. + simple_keys_by_tok map[int]int // possible simple_key indexes indexed by token_number + + // Parser stuff + + state ParserState // The current parser state. + states []ParserState // The parser states stack. + marks []Mark // The stack of marks. + tag_directives []TagDirective // The list of TAG directives. + + // Dumper stuff + + aliases []AliasData // The alias data. + + document *Document // The currently parsed document. +} + +type Comment struct { + scan_mark Mark // Position where scanning for comments started + token_mark Mark // Position after which tokens will be associated with this comment + start_mark Mark // Position of '#' comment mark + end_mark Mark // Position where comment terminated + + head []byte + line []byte + foot []byte +} + +// Emitter Definitions + +// WriteHandler is called when the [Emitter] needs to flush the accumulated +// characters to the output. The handler should write @a size bytes of the +// @a buffer to the output. +// +// @param[in,out] data A pointer to an application data specified by +// +// yamlEmitter.setOutput(). +// +// @param[in] buffer The buffer with bytes to be written. +// @param[in] size The size of the buffer. +// +// @returns On success, the handler should return @c 1. If the handler failed, +// the returned value should be @c 0. +type WriteHandler func(emitter *Emitter, buffer []byte) error + +type EmitterState int + +// The emitter states. +const ( + // Expect STREAM-START. + EMIT_STREAM_START_STATE EmitterState = iota + + EMIT_FIRST_DOCUMENT_START_STATE // Expect the first DOCUMENT-START or STREAM-END. + EMIT_DOCUMENT_START_STATE // Expect DOCUMENT-START or STREAM-END. + EMIT_DOCUMENT_CONTENT_STATE // Expect the content of a document. + EMIT_DOCUMENT_END_STATE // Expect DOCUMENT-END. + EMIT_FLOW_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a flow sequence. + EMIT_FLOW_SEQUENCE_TRAIL_ITEM_STATE // Expect the next item of a flow sequence, with the comma already written out + EMIT_FLOW_SEQUENCE_ITEM_STATE // Expect an item of a flow sequence. + EMIT_FLOW_MAPPING_FIRST_KEY_STATE // Expect the first key of a flow mapping. + EMIT_FLOW_MAPPING_TRAIL_KEY_STATE // Expect the next key of a flow mapping, with the comma already written out + EMIT_FLOW_MAPPING_KEY_STATE // Expect a key of a flow mapping. + EMIT_FLOW_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a flow mapping. + EMIT_FLOW_MAPPING_VALUE_STATE // Expect a value of a flow mapping. + EMIT_BLOCK_SEQUENCE_FIRST_ITEM_STATE // Expect the first item of a block sequence. + EMIT_BLOCK_SEQUENCE_ITEM_STATE // Expect an item of a block sequence. + EMIT_BLOCK_MAPPING_FIRST_KEY_STATE // Expect the first key of a block mapping. + EMIT_BLOCK_MAPPING_KEY_STATE // Expect the key of a block mapping. + EMIT_BLOCK_MAPPING_SIMPLE_VALUE_STATE // Expect a value for a simple key of a block mapping. + EMIT_BLOCK_MAPPING_VALUE_STATE // Expect a value of a block mapping. + EMIT_END_STATE // Expect nothing. +) + +// Emitter holds all information about the current state of the emitter. +type Emitter struct { + // Writer stuff + + write_handler WriteHandler // Write handler. + + output_buffer *[]byte // String output data. + output_writer io.Writer // File output data. + + buffer []byte // The working buffer. + buffer_pos int // The current position of the buffer. + + raw_buffer []byte // The raw buffer. + raw_buffer_pos int // The current position of the buffer. + + encoding Encoding // The stream encoding. + + // Emitter stuff + + canonical bool // If the output is in the canonical style? + BestIndent int // The number of indentation spaces. + best_width int // The preferred width of the output lines. + unicode bool // Allow unescaped non-ASCII characters? + line_break LineBreak // The preferred line break. + + state EmitterState // The current emitter state. + states []EmitterState // The stack of states. + + events []Event // The event queue. + events_head int // The head of the event queue. + + indents []int // The stack of indentation levels. + + tag_directives []TagDirective // The list of tag directives. + + indent int // The current indentation level. + + CompactSequenceIndent bool // Is '- ' is considered part of the indentation for sequence elements? + + flow_level int // The current flow level. + + root_context bool // Is it the document root context? + sequence_context bool // Is it a sequence context? + mapping_context bool // Is it a mapping context? + simple_key_context bool // Is it a simple mapping key context? + + line int // The current line. + column int // The current column. + whitespace bool // If the last character was a whitespace? + indention bool // If the last character was an indentation character (' ', '-', '?', ':')? + OpenEnded bool // If an explicit document end is required? + + space_above bool // Is there's an empty line above? + foot_indent int // The indent used to write the foot comment above, or -1 if none. + + // Anchor analysis. + anchor_data struct { + anchor []byte // The anchor value. + alias bool // Is it an alias? + } + + // Tag analysis. + tag_data struct { + handle []byte // The tag handle. + suffix []byte // The tag suffix. + } + + // Scalar analysis. + scalar_data struct { + value []byte // The scalar value. + multiline bool // Does the scalar contain line breaks? + flow_plain_allowed bool // Can the scalar be expressed in the flow plain style? + block_plain_allowed bool // Can the scalar be expressed in the block plain style? + single_quoted_allowed bool // Can the scalar be expressed in the single quoted style? + block_allowed bool // Can the scalar be expressed in the literal or folded styles? + style ScalarStyle // The output style. + } + + // Comments + HeadComment []byte + LineComment []byte + FootComment []byte + TailComment []byte + + key_line_comment []byte + + // Dumper stuff + + opened bool // If the stream was already opened? + closed bool // If the stream was already closed? + + // The information associated with the document nodes. + anchors *struct { + references int // The number of references. + anchor int // The anchor id. + serialized bool // If the node has been emitted? + } + + last_anchor_id int // The last assigned anchor id. + + document *Document // The currently emitted document. +} diff --git a/vendor/go.yaml.in/yaml/v4/internal/libyaml/yamlprivate.go b/vendor/go.yaml.in/yaml/v4/internal/libyaml/yamlprivate.go new file mode 100644 index 00000000..04acf12d --- /dev/null +++ b/vendor/go.yaml.in/yaml/v4/internal/libyaml/yamlprivate.go @@ -0,0 +1,241 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// Copyright (c) 2006-2010 Kirill Simonov +// +// Permission is hereby granted, free of charge, to any person obtaining a copy of +// this software and associated documentation files (the "Software"), to deal in +// the Software without restriction, including without limitation the rights to +// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies +// of the Software, and to permit persons to whom the Software is furnished to do +// so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in all +// copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +// SOFTWARE. + +package libyaml + +const ( + // The size of the input raw buffer. + input_raw_buffer_size = 512 + + // The size of the input buffer. + // It should be possible to decode the whole raw buffer. + input_buffer_size = input_raw_buffer_size * 3 + + // The size of the output buffer. + output_buffer_size = 128 + + // The size of the output raw buffer. + // It should be possible to encode the whole output buffer. + output_raw_buffer_size = (output_buffer_size*2 + 2) + + // The size of other stacks and queues. + initial_stack_size = 16 + initial_queue_size = 16 + initial_string_size = 16 +) + +// Check if the character at the specified position is an alphabetical +// character, a digit, '_', or '-'. +func isAlpha(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'Z' || + b[i] >= 'a' && b[i] <= 'z' || b[i] == '_' || b[i] == '-' +} + +// Check if the character at the specified position is a flow indicator as +// defined by spec production [23] c-flow-indicator ::= +// c-collect-entry | c-sequence-start | c-sequence-end | +// c-mapping-start | c-mapping-end +func isFlowIndicator(b []byte, i int) bool { + return b[i] == '[' || b[i] == ']' || + b[i] == '{' || b[i] == '}' || b[i] == ',' +} + +// Check if the character at the specified position is valid for anchor names +// as defined by spec production [102] ns-anchor-char ::= ns-char - +// c-flow-indicator. +// This includes all printable characters except: CR, LF, BOM, space, tab, '[', +// ']', '{', '}', ','. +// We further limit it to ascii chars only, which is a subset of the spec +// production but is usually what most people expect. +func isAnchorChar(b []byte, i int) bool { + if isColon(b, i) { + // [Go] we exclude colons from anchor/alias names. + // + // A colon is a valid anchor character according to the YAML 1.2 specification, + // but it can lead to ambiguity. + // https://github.com/yaml/go-yaml/issues/109 + // + // Also, it would have been a breaking change to support it, as go.yaml.in/yaml/v3 ignores it. + // Supporting it could lead to unexpected behavior. + return false + } + + return isPrintable(b, i) && + !isLineBreak(b, i) && + !isBlank(b, i) && + !isBOM(b, i) && + !isFlowIndicator(b, i) && + isASCII(b, i) +} + +// isColon checks whether the character at the specified position is a colon. +func isColon(b []byte, i int) bool { + return b[i] == ':' +} + +// Check if the character at the specified position is a digit. +func isDigit(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' +} + +// Get the value of a digit. +func asDigit(b []byte, i int) int { + return int(b[i]) - '0' +} + +// Check if the character at the specified position is a hex-digit. +func isHex(b []byte, i int) bool { + return b[i] >= '0' && b[i] <= '9' || b[i] >= 'A' && b[i] <= 'F' || + b[i] >= 'a' && b[i] <= 'f' +} + +// Get the value of a hex-digit. +func asHex(b []byte, i int) int { + bi := b[i] + if bi >= 'A' && bi <= 'F' { + return int(bi) - 'A' + 10 + } + if bi >= 'a' && bi <= 'f' { + return int(bi) - 'a' + 10 + } + return int(bi) - '0' +} + +// Check if the character is ASCII. +func isASCII(b []byte, i int) bool { + return b[i] <= 0x7F +} + +// Check if the character at the start of the buffer can be printed unescaped. +func isPrintable(b []byte, i int) bool { + return ((b[i] == 0x0A) || // . == #x0A + (b[i] >= 0x20 && b[i] <= 0x7E) || // #x20 <= . <= #x7E + (b[i] == 0xC2 && b[i+1] >= 0xA0) || // #0xA0 <= . <= #xD7FF + (b[i] > 0xC2 && b[i] < 0xED) || + (b[i] == 0xED && b[i+1] < 0xA0) || + (b[i] == 0xEE) || + (b[i] == 0xEF && // #xE000 <= . <= #xFFFD + !(b[i+1] == 0xBB && b[i+2] == 0xBF) && // && . != #xFEFF + !(b[i+1] == 0xBF && (b[i+2] == 0xBE || b[i+2] == 0xBF)))) +} + +// Check if the character at the specified position is NUL. +func isZeroChar(b []byte, i int) bool { + return b[i] == 0x00 +} + +// Check if the beginning of the buffer is a BOM. +func isBOM(b []byte, i int) bool { + return b[0] == 0xEF && b[1] == 0xBB && b[2] == 0xBF +} + +// Check if the character at the specified position is space. +func isSpace(b []byte, i int) bool { + return b[i] == ' ' +} + +// Check if the character at the specified position is tab. +func isTab(b []byte, i int) bool { + return b[i] == '\t' +} + +// Check if the character at the specified position is blank (space or tab). +func isBlank(b []byte, i int) bool { + // return isSpace(b, i) || isTab(b, i) + return b[i] == ' ' || b[i] == '\t' +} + +// Check if the character at the specified position is a line break. +func isLineBreak(b []byte, i int) bool { + return (b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9) // PS (#x2029) +} + +func isCRLF(b []byte, i int) bool { + return b[i] == '\r' && b[i+1] == '\n' +} + +// Check if the character is a line break or NUL. +func isBreakOrZero(b []byte, i int) bool { + // return isLineBreak(b, i) || isZeroChar(b, i) + return ( + // isBreak: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + // isZeroChar: + b[i] == 0) +} + +// Check if the character is a line break, space, or NUL. +func isSpaceOrZero(b []byte, i int) bool { + // return isSpace(b, i) || isBreakOrZero(b, i) + return ( + // isSpace: + b[i] == ' ' || + // isBreakOrZero: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + b[i] == 0) +} + +// Check if the character is a line break, space, tab, or NUL. +func isBlankOrZero(b []byte, i int) bool { + // return isBlank(b, i) || isBreakOrZero(b, i) + return ( + // isBlank: + b[i] == ' ' || b[i] == '\t' || + // isBreakOrZero: + b[i] == '\r' || // CR (#xD) + b[i] == '\n' || // LF (#xA) + b[i] == 0xC2 && b[i+1] == 0x85 || // NEL (#x85) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA8 || // LS (#x2028) + b[i] == 0xE2 && b[i+1] == 0x80 && b[i+2] == 0xA9 || // PS (#x2029) + b[i] == 0) +} + +// Determine the width of the character. +func width(b byte) int { + // Don't replace these by a switch without first + // confirming that it is being inlined. + if b&0x80 == 0x00 { + return 1 + } + if b&0xE0 == 0xC0 { + return 2 + } + if b&0xF0 == 0xE0 { + return 3 + } + if b&0xF8 == 0xF0 { + return 4 + } + return 0 +} diff --git a/vendor/go.yaml.in/yaml/v4/resolve.go b/vendor/go.yaml.in/yaml/v4/resolve.go new file mode 100644 index 00000000..ae855f2f --- /dev/null +++ b/vendor/go.yaml.in/yaml/v4/resolve.go @@ -0,0 +1,286 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package yaml + +import ( + "encoding/base64" + "math" + "regexp" + "strconv" + "strings" + "time" +) + +type resolveMapItem struct { + value any + tag string +} + +var ( + resolveTable = make([]byte, 256) + resolveMap = make(map[string]resolveMapItem) +) + +func init() { + t := resolveTable + t[int('+')] = 'S' // Sign + t[int('-')] = 'S' + for _, c := range "0123456789" { + t[int(c)] = 'D' // Digit + } + for _, c := range "yYnNtTfFoO~" { + t[int(c)] = 'M' // In map + } + t[int('.')] = '.' // Float (potentially in map) + + resolveMapList := []struct { + v any + tag string + l []string + }{ + {true, boolTag, []string{"true", "True", "TRUE"}}, + {false, boolTag, []string{"false", "False", "FALSE"}}, + {nil, nullTag, []string{"", "~", "null", "Null", "NULL"}}, + {math.NaN(), floatTag, []string{".nan", ".NaN", ".NAN"}}, + {math.Inf(+1), floatTag, []string{".inf", ".Inf", ".INF"}}, + {math.Inf(+1), floatTag, []string{"+.inf", "+.Inf", "+.INF"}}, + {math.Inf(-1), floatTag, []string{"-.inf", "-.Inf", "-.INF"}}, + {negativeZero, floatTag, []string{"-0", "-0.0"}}, + {"<<", mergeTag, []string{"<<"}}, + } + + m := resolveMap + for _, item := range resolveMapList { + for _, s := range item.l { + m[s] = resolveMapItem{item.v, item.tag} + } + } +} + +const ( + nullTag = "!!null" + boolTag = "!!bool" + strTag = "!!str" + intTag = "!!int" + floatTag = "!!float" + timestampTag = "!!timestamp" + seqTag = "!!seq" + mapTag = "!!map" + binaryTag = "!!binary" + mergeTag = "!!merge" +) + +// negativeZero represents -0.0 for YAML encoding/decoding +// this is needed because Go constants cannot express -0.0 +// https://staticcheck.dev/docs/checks/#SA4026 +var negativeZero = math.Copysign(0.0, -1.0) + +var ( + longTags = make(map[string]string) + shortTags = make(map[string]string) +) + +func init() { + for _, stag := range []string{nullTag, boolTag, strTag, intTag, floatTag, timestampTag, seqTag, mapTag, binaryTag, mergeTag} { + ltag := longTag(stag) + longTags[stag] = ltag + shortTags[ltag] = stag + } +} + +const longTagPrefix = "tag:yaml.org,2002:" + +func shortTag(tag string) string { + if strings.HasPrefix(tag, longTagPrefix) { + if stag, ok := shortTags[tag]; ok { + return stag + } + return "!!" + tag[len(longTagPrefix):] + } + return tag +} + +func longTag(tag string) string { + if strings.HasPrefix(tag, "!!") { + if ltag, ok := longTags[tag]; ok { + return ltag + } + return longTagPrefix + tag[2:] + } + return tag +} + +func resolvableTag(tag string) bool { + switch tag { + case "", strTag, boolTag, intTag, floatTag, nullTag, timestampTag: + return true + } + return false +} + +var yamlStyleFloat = regexp.MustCompile(`^[-+]?(\.[0-9]+|[0-9]+(\.[0-9]*)?)([eE][-+]?[0-9]+)?$`) + +func resolve(tag string, in string) (rtag string, out any) { + tag = shortTag(tag) + if !resolvableTag(tag) { + return tag, in + } + + defer func() { + switch tag { + case "", rtag, strTag, binaryTag: + return + case floatTag: + if rtag == intTag { + switch v := out.(type) { + case int64: + rtag = floatTag + out = float64(v) + return + case int: + rtag = floatTag + out = float64(v) + return + } + } + } + failf("cannot decode %s `%s` as a %s", shortTag(rtag), in, shortTag(tag)) + }() + + // Any data is accepted as a !!str or !!binary. + // Otherwise, the prefix is enough of a hint about what it might be. + hint := byte('N') + if in != "" { + hint = resolveTable[in[0]] + } + if hint != 0 && tag != strTag && tag != binaryTag { + // Handle things we can lookup in a map. + if item, ok := resolveMap[in]; ok { + return item.tag, item.value + } + + // Base 60 floats are a bad idea, were dropped in YAML 1.2, and + // are purposefully unsupported here. They're still quoted on + // the way out for compatibility with other parser, though. + + switch hint { + case 'M': + // We've already checked the map above. + + case '.': + // Not in the map, so maybe a normal float. + floatv, err := strconv.ParseFloat(in, 64) + if err == nil { + return floatTag, floatv + } + + case 'D', 'S': + // Int, float, or timestamp. + // Only try values as a timestamp if the value is unquoted or there's an explicit + // !!timestamp tag. + if tag == "" || tag == timestampTag { + t, ok := parseTimestamp(in) + if ok { + return timestampTag, t + } + } + + plain := strings.ReplaceAll(in, "_", "") + intv, err := strconv.ParseInt(plain, 0, 64) + if err == nil { + if intv == int64(int(intv)) { + return intTag, int(intv) + } else { + return intTag, intv + } + } + uintv, err := strconv.ParseUint(plain, 0, 64) + if err == nil { + return intTag, uintv + } + if yamlStyleFloat.MatchString(plain) { + floatv, err := strconv.ParseFloat(plain, 64) + if err == nil { + return floatTag, floatv + } + } + default: + panic("internal error: missing handler for resolver table: " + string(rune(hint)) + " (with " + in + ")") + } + } + return strTag, in +} + +// encodeBase64 encodes s as base64 that is broken up into multiple lines +// as appropriate for the resulting length. +func encodeBase64(s string) string { + const lineLen = 70 + encLen := base64.StdEncoding.EncodedLen(len(s)) + lines := encLen/lineLen + 1 + buf := make([]byte, encLen*2+lines) + in := buf[0:encLen] + out := buf[encLen:] + base64.StdEncoding.Encode(in, []byte(s)) + k := 0 + for i := 0; i < len(in); i += lineLen { + j := i + lineLen + if j > len(in) { + j = len(in) + } + k += copy(out[k:], in[i:j]) + if lines > 1 { + out[k] = '\n' + k++ + } + } + return string(out[:k]) +} + +// This is a subset of the formats allowed by the regular expression +// defined at http://yaml.org/type/timestamp.html. +var allowedTimestampFormats = []string{ + "2006-1-2T15:4:5.999999999Z07:00", // RCF3339Nano with short date fields. + "2006-1-2t15:4:5.999999999Z07:00", // RFC3339Nano with short date fields and lower-case "t". + "2006-1-2 15:4:5.999999999", // space separated with no time zone + "2006-1-2", // date only + // Notable exception: time.Parse cannot handle: "2001-12-14 21:59:43.10 -5" + // from the set of examples. +} + +// parseTimestamp parses s as a timestamp string and +// returns the timestamp and reports whether it succeeded. +// Timestamp formats are defined at http://yaml.org/type/timestamp.html +func parseTimestamp(s string) (time.Time, bool) { + // TODO write code to check all the formats supported by + // http://yaml.org/type/timestamp.html instead of using time.Parse. + + // Quick check: all date formats start with YYYY-. + i := 0 + for ; i < len(s); i++ { + if c := s[i]; c < '0' || c > '9' { + break + } + } + if i != 4 || i == len(s) || s[i] != '-' { + return time.Time{}, false + } + for _, format := range allowedTimestampFormats { + if t, err := time.Parse(format, s); err == nil { + return t, true + } + } + return time.Time{}, false +} diff --git a/vendor/go.yaml.in/yaml/v4/sorter.go b/vendor/go.yaml.in/yaml/v4/sorter.go new file mode 100644 index 00000000..7131bbae --- /dev/null +++ b/vendor/go.yaml.in/yaml/v4/sorter.go @@ -0,0 +1,134 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package yaml + +import ( + "reflect" + "unicode" +) + +type keyList []reflect.Value + +func (l keyList) Len() int { return len(l) } +func (l keyList) Swap(i, j int) { l[i], l[j] = l[j], l[i] } +func (l keyList) Less(i, j int) bool { + a := l[i] + b := l[j] + ak := a.Kind() + bk := b.Kind() + for (ak == reflect.Interface || ak == reflect.Pointer) && !a.IsNil() { + a = a.Elem() + ak = a.Kind() + } + for (bk == reflect.Interface || bk == reflect.Pointer) && !b.IsNil() { + b = b.Elem() + bk = b.Kind() + } + af, aok := keyFloat(a) + bf, bok := keyFloat(b) + if aok && bok { + if af != bf { + return af < bf + } + if ak != bk { + return ak < bk + } + return numLess(a, b) + } + if ak != reflect.String || bk != reflect.String { + return ak < bk + } + ar, br := []rune(a.String()), []rune(b.String()) + digits := false + for i := 0; i < len(ar) && i < len(br); i++ { + if ar[i] == br[i] { + digits = unicode.IsDigit(ar[i]) + continue + } + al := unicode.IsLetter(ar[i]) + bl := unicode.IsLetter(br[i]) + if al && bl { + return ar[i] < br[i] + } + if al || bl { + if digits { + return al + } else { + return bl + } + } + var ai, bi int + var an, bn int64 + if ar[i] == '0' || br[i] == '0' { + for j := i - 1; j >= 0 && unicode.IsDigit(ar[j]); j-- { + if ar[j] != '0' { + an = 1 + bn = 1 + break + } + } + } + for ai = i; ai < len(ar) && unicode.IsDigit(ar[ai]); ai++ { + an = an*10 + int64(ar[ai]-'0') + } + for bi = i; bi < len(br) && unicode.IsDigit(br[bi]); bi++ { + bn = bn*10 + int64(br[bi]-'0') + } + if an != bn { + return an < bn + } + if ai != bi { + return ai < bi + } + return ar[i] < br[i] + } + return len(ar) < len(br) +} + +// keyFloat returns a float value for v if it is a number/bool +// and whether it is a number/bool or not. +func keyFloat(v reflect.Value) (f float64, ok bool) { + switch v.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return float64(v.Int()), true + case reflect.Float32, reflect.Float64: + return v.Float(), true + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return float64(v.Uint()), true + case reflect.Bool: + if v.Bool() { + return 1, true + } + return 0, true + } + return 0, false +} + +// numLess returns whether a < b. +// a and b must necessarily have the same kind. +func numLess(a, b reflect.Value) bool { + switch a.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return a.Int() < b.Int() + case reflect.Float32, reflect.Float64: + return a.Float() < b.Float() + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return a.Uint() < b.Uint() + case reflect.Bool: + return !a.Bool() && b.Bool() + } + panic("not a number") +} diff --git a/vendor/go.yaml.in/yaml/v4/yaml.go b/vendor/go.yaml.in/yaml/v4/yaml.go new file mode 100644 index 00000000..a218035c --- /dev/null +++ b/vendor/go.yaml.in/yaml/v4/yaml.go @@ -0,0 +1,904 @@ +// +// Copyright (c) 2011-2019 Canonical Ltd +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package yaml implements YAML support for the Go language. +// +// Source code and other details for the project are available at GitHub: +// +// https://github.com/yaml/go-yaml +package yaml + +import ( + "errors" + "fmt" + "io" + "reflect" + "strconv" + "strings" + "sync" + "unicode" + "unicode/utf8" + + "go.yaml.in/yaml/v4/internal/libyaml" +) + +// The Unmarshaler interface may be implemented by types to customize their +// behavior when being unmarshaled from a YAML document. +type Unmarshaler interface { + UnmarshalYAML(value *Node) error +} + +type obsoleteUnmarshaler interface { + UnmarshalYAML(unmarshal func(any) error) error +} + +// The Marshaler interface may be implemented by types to customize their +// behavior when being marshaled into a YAML document. The returned value +// is marshaled in place of the original value implementing Marshaler. +// +// If an error is returned by MarshalYAML, the marshaling procedure stops +// and returns with the provided error. +type Marshaler interface { + MarshalYAML() (any, error) +} + +// Unmarshal decodes the first document found within the in byte slice +// and assigns decoded values into the out value. +// +// Maps and pointers (to a struct, string, int, etc) are accepted as out +// values. If an internal pointer within a struct is not initialized, +// the yaml package will initialize it if necessary for unmarshalling +// the provided data. The out parameter must not be nil. +// +// The type of the decoded values should be compatible with the respective +// values in out. If one or more values cannot be decoded due to a type +// mismatches, decoding continues partially until the end of the YAML +// content, and a *yaml.TypeError is returned with details for all +// missed values. +// +// Struct fields are only unmarshalled if they are exported (have an +// upper case first letter), and are unmarshalled using the field name +// lowercased as the default key. Custom keys may be defined via the +// "yaml" name in the field tag: the content preceding the first comma +// is used as the key, and the following comma-separated options are +// used to tweak the marshaling process (see Marshal). +// Conflicting names result in a runtime error. +// +// For example: +// +// type T struct { +// F int `yaml:"a,omitempty"` +// B int +// } +// var t T +// yaml.Unmarshal([]byte("a: 1\nb: 2"), &t) +// +// See the documentation of Marshal for the format of tags and a list of +// supported tag options. +func Unmarshal(in []byte, out any) (err error) { + return unmarshal(in, out, false) +} + +// A Decoder reads and decodes YAML values from an input stream. +type Decoder struct { + parser *parser + knownFields bool +} + +// NewDecoder returns a new decoder that reads from r. +// +// The decoder introduces its own buffering and may read +// data from r beyond the YAML values requested. +func NewDecoder(r io.Reader) *Decoder { + return &Decoder{ + parser: newParserFromReader(r), + } +} + +// KnownFields ensures that the keys in decoded mappings to +// exist as fields in the struct being decoded into. +func (dec *Decoder) KnownFields(enable bool) { + dec.knownFields = enable +} + +// Decode reads the next YAML-encoded value from its input +// and stores it in the value pointed to by v. +// +// See the documentation for Unmarshal for details about the +// conversion of YAML into a Go value. +func (dec *Decoder) Decode(v any) (err error) { + d := newDecoder() + d.knownFields = dec.knownFields + defer handleErr(&err) + node := dec.parser.parse() + if node == nil { + return io.EOF + } + out := reflect.ValueOf(v) + if out.Kind() == reflect.Pointer && !out.IsNil() { + out = out.Elem() + } + d.unmarshal(node, out) + if len(d.terrors) > 0 { + return &TypeError{d.terrors} + } + return nil +} + +// Decode decodes the node and stores its data into the value pointed to by v. +// +// See the documentation for Unmarshal for details about the +// conversion of YAML into a Go value. +func (n *Node) Decode(v any) (err error) { + d := newDecoder() + defer handleErr(&err) + out := reflect.ValueOf(v) + if out.Kind() == reflect.Pointer && !out.IsNil() { + out = out.Elem() + } + d.unmarshal(n, out) + if len(d.terrors) > 0 { + return &TypeError{d.terrors} + } + return nil +} + +func unmarshal(in []byte, out any, strict bool) (err error) { + defer handleErr(&err) + d := newDecoder() + p := newParser(in) + defer p.destroy() + node := p.parse() + if node != nil { + v := reflect.ValueOf(out) + if v.Kind() == reflect.Pointer && !v.IsNil() { + v = v.Elem() + } + d.unmarshal(node, v) + } + if len(d.terrors) > 0 { + return &TypeError{d.terrors} + } + return nil +} + +// Marshal serializes the value provided into a YAML document. The structure +// of the generated document will reflect the structure of the value itself. +// Maps and pointers (to struct, string, int, etc) are accepted as the in value. +// +// Struct fields are only marshaled if they are exported (have an upper case +// first letter), and are marshaled using the field name lowercased as the +// default key. Custom keys may be defined via the "yaml" name in the field +// tag: the content preceding the first comma is used as the key, and the +// following comma-separated options are used to tweak the marshaling process. +// Conflicting names result in a runtime error. +// +// The field tag format accepted is: +// +// `(...) yaml:"[][,[,]]" (...)` +// +// The following flags are currently supported: +// +// omitempty Only include the field if it's not set to the zero +// value for the type or to empty slices or maps. +// Zero valued structs will be omitted if all their public +// fields are zero, unless they implement an IsZero +// method (see the IsZeroer interface type), in which +// case the field will be excluded if IsZero returns true. +// +// flow Marshal using a flow style (useful for structs, +// sequences and maps). +// +// inline Inline the field, which must be a struct or a map, +// causing all of its fields or keys to be processed as if +// they were part of the outer struct. For maps, keys must +// not conflict with the yaml keys of other struct fields. +// +// In addition, if the key is "-", the field is ignored. +// +// For example: +// +// type T struct { +// F int `yaml:"a,omitempty"` +// B int +// } +// yaml.Marshal(&T{B: 2}) // Returns "b: 2\n" +// yaml.Marshal(&T{F: 1}} // Returns "a: 1\nb: 0\n" +func Marshal(in any) (out []byte, err error) { + defer handleErr(&err) + e := newEncoder() + defer e.destroy() + e.marshalDoc("", reflect.ValueOf(in)) + e.finish() + out = e.out + return out, err +} + +// An Encoder writes YAML values to an output stream. +type Encoder struct { + encoder *encoder +} + +// NewEncoder returns a new encoder that writes to w. +// The Encoder should be closed after use to flush all data +// to w. +func NewEncoder(w io.Writer) *Encoder { + return &Encoder{ + encoder: newEncoderWithWriter(w), + } +} + +// Encode writes the YAML encoding of v to the stream. +// If multiple items are encoded to the stream, the +// second and subsequent document will be preceded +// with a "---" document separator, but the first will not. +// +// See the documentation for Marshal for details about the conversion of Go +// values to YAML. +func (e *Encoder) Encode(v any) (err error) { + defer handleErr(&err) + e.encoder.marshalDoc("", reflect.ValueOf(v)) + return nil +} + +// Encode encodes value v and stores its representation in n. +// +// See the documentation for Marshal for details about the +// conversion of Go values into YAML. +func (n *Node) Encode(v any) (err error) { + defer handleErr(&err) + e := newEncoder() + defer e.destroy() + e.marshalDoc("", reflect.ValueOf(v)) + e.finish() + p := newParser(e.out) + p.textless = true + defer p.destroy() + doc := p.parse() + *n = *doc.Content[0] + return nil +} + +// SetIndent changes the used indentation used when encoding. +func (e *Encoder) SetIndent(spaces int) { + if spaces < 0 { + panic("yaml: cannot indent to a negative number of spaces") + } + e.encoder.indent = spaces +} + +// CompactSeqIndent makes it so that '- ' is considered part of the indentation. +func (e *Encoder) CompactSeqIndent() { + e.encoder.emitter.CompactSequenceIndent = true +} + +// DefaultSeqIndent makes it so that '- ' is not considered part of the indentation. +func (e *Encoder) DefaultSeqIndent() { + e.encoder.emitter.CompactSequenceIndent = false +} + +// Close closes the encoder by writing any remaining data. +// It does not write a stream terminating string "...". +func (e *Encoder) Close() (err error) { + defer handleErr(&err) + e.encoder.finish() + return nil +} + +func handleErr(err *error) { + if v := recover(); v != nil { + if e, ok := v.(*yamlError); ok { + *err = e.err + } else { + panic(v) + } + } +} + +type yamlError struct { + err error +} + +func fail(err error) { + panic(&yamlError{err}) +} + +func failf(format string, args ...any) { + panic(&yamlError{fmt.Errorf("yaml: "+format, args...)}) +} + +// ParserError represents a fatal error encountered during the parsing phase. +// These errors typically indicate a syntax issue in the YAML document that +// prevents further processing. +type ParserError struct { + Message string + Line int + Column int +} + +func (e *ParserError) Error() string { + var b strings.Builder + b.WriteString("yaml: ") + if e.Line != 0 { + b.WriteString("line " + strconv.Itoa(e.Line) + ": ") + } + b.WriteString(e.Message) + return b.String() +} + +// UnmarshalError represents a single, non-fatal error that occurred during +// the unmarshaling of a YAML document into a Go value. +type UnmarshalError struct { + Err error + Line int + Column int +} + +func (e *UnmarshalError) Error() string { + return fmt.Sprintf("line %d: %s", e.Line, e.Err.Error()) +} + +func (e *UnmarshalError) Unwrap() error { + return e.Err +} + +// A TypeError is returned by Unmarshal when one or more fields in +// the YAML document cannot be properly decoded into the requested +// types. When this error is returned, the value is still +// unmarshaled partially. +type TypeError struct { + Errors []*UnmarshalError +} + +func (e *TypeError) Error() string { + var b strings.Builder + b.WriteString("yaml: unmarshal errors:") + for _, err := range e.Errors { + b.WriteString("\n " + err.Error()) + } + return b.String() +} + +// Is checks if the error is equal to any of the errors in the TypeError. +// +// [errors.Is] will call this method when unwrapping errors. +func (e *TypeError) Is(target error) bool { + for _, err := range e.Errors { + if errors.Is(err, target) { + return true + } + + // Check if the error is not wrapped in the UnmarshalError. + if err != nil && errors.Is(err.Err, target) { + return true + } + } + return false +} + +// As checks if the error is equal to any of the errors in the TypeError. +// +// [errors.As] will call this method when unwrapping errors. +func (e *TypeError) As(target any) bool { + for _, err := range e.Errors { + if errors.As(err, target) { + return true + } + } + return false +} + +type Kind uint32 + +const ( + DocumentNode Kind = 1 << iota + SequenceNode + MappingNode + ScalarNode + AliasNode +) + +type Style uint32 + +const ( + TaggedStyle Style = 1 << iota + DoubleQuotedStyle + SingleQuotedStyle + LiteralStyle + FoldedStyle + FlowStyle +) + +// Node represents an element in the YAML document hierarchy. While documents +// are typically encoded and decoded into higher level types, such as structs +// and maps, Node is an intermediate representation that allows detailed +// control over the content being decoded or encoded. +// +// It's worth noting that although Node offers access into details such as +// line numbers, columns, and comments, the content when re-encoded will not +// have its original textual representation preserved. An effort is made to +// render the data pleasantly, and to preserve comments near the data they +// describe, though. +// +// Values that make use of the Node type interact with the yaml package in the +// same way any other type would do, by encoding and decoding yaml data +// directly or indirectly into them. +// +// For example: +// +// var person struct { +// Name string +// Address yaml.Node +// } +// err := yaml.Unmarshal(data, &person) +// +// Or by itself: +// +// var person Node +// err := yaml.Unmarshal(data, &person) +type Node struct { + // Kind defines whether the node is a document, a mapping, a sequence, + // a scalar value, or an alias to another node. The specific data type of + // scalar nodes may be obtained via the ShortTag and LongTag methods. + Kind Kind + + // Style allows customizing the appearance of the node in the tree. + Style Style + + // Tag holds the YAML tag defining the data type for the value. + // When decoding, this field will always be set to the resolved tag, + // even when it wasn't explicitly provided in the YAML content. + // When encoding, if this field is unset the value type will be + // implied from the node properties, and if it is set, it will only + // be serialized into the representation if TaggedStyle is used or + // the implicit tag diverges from the provided one. + Tag string + + // Value holds the unescaped and unquoted representation of the value. + Value string + + // Anchor holds the anchor name for this node, which allows aliases to point to it. + Anchor string + + // Alias holds the node that this alias points to. Only valid when Kind is AliasNode. + Alias *Node + + // Content holds contained nodes for documents, mappings, and sequences. + Content []*Node + + // HeadComment holds any comments in the lines preceding the node and + // not separated by an empty line. + HeadComment string + + // LineComment holds any comments at the end of the line where the node is in. + LineComment string + + // FootComment holds any comments following the node and before empty lines. + FootComment string + + // Line and Column hold the node position in the decoded YAML text. + // These fields are not respected when encoding the node. + Line int + Column int +} + +// IsZero returns whether the node has all of its fields unset. +func (n *Node) IsZero() bool { + return n.Kind == 0 && n.Style == 0 && n.Tag == "" && n.Value == "" && n.Anchor == "" && n.Alias == nil && n.Content == nil && + n.HeadComment == "" && n.LineComment == "" && n.FootComment == "" && n.Line == 0 && n.Column == 0 +} + +// LongTag returns the long form of the tag that indicates the data type for +// the node. If the Tag field isn't explicitly defined, one will be computed +// based on the node properties. +func (n *Node) LongTag() string { + return longTag(n.ShortTag()) +} + +// ShortTag returns the short form of the YAML tag that indicates data type for +// the node. If the Tag field isn't explicitly defined, one will be computed +// based on the node properties. +func (n *Node) ShortTag() string { + if n.indicatedString() { + return strTag + } + if n.Tag == "" || n.Tag == "!" { + switch n.Kind { + case MappingNode: + return mapTag + case SequenceNode: + return seqTag + case AliasNode: + if n.Alias != nil { + return n.Alias.ShortTag() + } + case ScalarNode: + tag, _ := resolve("", n.Value) + return tag + case 0: + // Special case to make the zero value convenient. + if n.IsZero() { + return nullTag + } + } + return "" + } + return shortTag(n.Tag) +} + +func (n *Node) indicatedString() bool { + return n.Kind == ScalarNode && + (shortTag(n.Tag) == strTag || + (n.Tag == "" || n.Tag == "!") && n.Style&(SingleQuotedStyle|DoubleQuotedStyle|LiteralStyle|FoldedStyle) != 0) +} + +// shouldUseLiteralStyle determines if a string should use literal style. +// It returns true if the string contains newlines AND meets additional criteria: +// - is at least 2 characters long +// - contains at least one non-whitespace character +func shouldUseLiteralStyle(s string) bool { + if !strings.Contains(s, "\n") || len(s) < 2 { + return false + } + // Must contain at least one non-whitespace character + for _, r := range s { + if !unicode.IsSpace(r) { + return true + } + } + return false +} + +// SetString is a convenience function that sets the node to a string value +// and defines its style in a pleasant way depending on its content. +func (n *Node) SetString(s string) { + n.Kind = ScalarNode + if utf8.ValidString(s) { + n.Value = s + n.Tag = strTag + } else { + n.Value = encodeBase64(s) + n.Tag = binaryTag + } + if shouldUseLiteralStyle(n.Value) { + n.Style = LiteralStyle + } +} + +// -------------------------------------------------------------------------- +// Maintain a mapping of keys to structure field indexes + +// The code in this section was copied from mgo/bson. + +// structInfo holds details for the serialization of fields of +// a given struct. +type structInfo struct { + FieldsMap map[string]fieldInfo + FieldsList []fieldInfo + + // InlineMap is the number of the field in the struct that + // contains an ,inline map, or -1 if there's none. + InlineMap int + + // InlineUnmarshalers holds indexes to inlined fields that + // contain unmarshaler values. + InlineUnmarshalers [][]int +} + +type fieldInfo struct { + Key string + Num int + OmitEmpty bool + Flow bool + // Id holds the unique field identifier, so we can cheaply + // check for field duplicates without maintaining an extra map. + Id int + + // Inline holds the field index if the field is part of an inlined struct. + Inline []int +} + +var ( + structMap = make(map[reflect.Type]*structInfo) + fieldMapMutex sync.RWMutex + unmarshalerType reflect.Type +) + +func init() { + var v Unmarshaler + unmarshalerType = reflect.ValueOf(&v).Elem().Type() +} + +func getStructInfo(st reflect.Type) (*structInfo, error) { + fieldMapMutex.RLock() + sinfo, found := structMap[st] + fieldMapMutex.RUnlock() + if found { + return sinfo, nil + } + + n := st.NumField() + fieldsMap := make(map[string]fieldInfo) + fieldsList := make([]fieldInfo, 0, n) + inlineMap := -1 + inlineUnmarshalers := [][]int(nil) + for i := 0; i != n; i++ { + field := st.Field(i) + if field.PkgPath != "" && !field.Anonymous { + continue // Private field + } + + info := fieldInfo{Num: i} + + tag := field.Tag.Get("yaml") + if tag == "" && !strings.Contains(string(field.Tag), ":") { + tag = string(field.Tag) + } + if tag == "-" { + continue + } + + inline := false + fields := strings.Split(tag, ",") + if len(fields) > 1 { + for _, flag := range fields[1:] { + switch flag { + case "omitempty": + info.OmitEmpty = true + case "flow": + info.Flow = true + case "inline": + inline = true + default: + return nil, fmt.Errorf("unsupported flag %q in tag %q of type %s", flag, tag, st) + } + } + tag = fields[0] + } + + if inline { + switch field.Type.Kind() { + case reflect.Map: + if inlineMap >= 0 { + return nil, errors.New("multiple ,inline maps in struct " + st.String()) + } + if field.Type.Key() != reflect.TypeOf("") { + return nil, errors.New("option ,inline needs a map with string keys in struct " + st.String()) + } + inlineMap = info.Num + case reflect.Struct, reflect.Pointer: + ftype := field.Type + for ftype.Kind() == reflect.Pointer { + ftype = ftype.Elem() + } + if ftype.Kind() != reflect.Struct { + return nil, errors.New("option ,inline may only be used on a struct or map field") + } + if reflect.PointerTo(ftype).Implements(unmarshalerType) { + inlineUnmarshalers = append(inlineUnmarshalers, []int{i}) + } else { + sinfo, err := getStructInfo(ftype) + if err != nil { + return nil, err + } + for _, index := range sinfo.InlineUnmarshalers { + inlineUnmarshalers = append(inlineUnmarshalers, append([]int{i}, index...)) + } + for _, finfo := range sinfo.FieldsList { + if _, found := fieldsMap[finfo.Key]; found { + msg := "duplicated key '" + finfo.Key + "' in struct " + st.String() + return nil, errors.New(msg) + } + if finfo.Inline == nil { + finfo.Inline = []int{i, finfo.Num} + } else { + finfo.Inline = append([]int{i}, finfo.Inline...) + } + finfo.Id = len(fieldsList) + fieldsMap[finfo.Key] = finfo + fieldsList = append(fieldsList, finfo) + } + } + default: + return nil, errors.New("option ,inline may only be used on a struct or map field") + } + continue + } + + if tag != "" { + info.Key = tag + } else { + info.Key = strings.ToLower(field.Name) + } + + if _, found = fieldsMap[info.Key]; found { + msg := "duplicated key '" + info.Key + "' in struct " + st.String() + return nil, errors.New(msg) + } + + info.Id = len(fieldsList) + fieldsList = append(fieldsList, info) + fieldsMap[info.Key] = info + } + + sinfo = &structInfo{ + FieldsMap: fieldsMap, + FieldsList: fieldsList, + InlineMap: inlineMap, + InlineUnmarshalers: inlineUnmarshalers, + } + + fieldMapMutex.Lock() + structMap[st] = sinfo + fieldMapMutex.Unlock() + return sinfo, nil +} + +// IsZeroer is used to check whether an object is zero to +// determine whether it should be omitted when marshaling +// with the omitempty flag. One notable implementation +// is time.Time. +type IsZeroer interface { + IsZero() bool +} + +func isZero(v reflect.Value) bool { + kind := v.Kind() + if z, ok := v.Interface().(IsZeroer); ok { + if (kind == reflect.Pointer || kind == reflect.Interface) && v.IsNil() { + return true + } + return z.IsZero() + } + switch kind { + case reflect.String: + return len(v.String()) == 0 + case reflect.Interface, reflect.Pointer: + return v.IsNil() + case reflect.Slice: + return v.Len() == 0 + case reflect.Map: + return v.Len() == 0 + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Struct: + vt := v.Type() + for i := v.NumField() - 1; i >= 0; i-- { + if vt.Field(i).PkgPath != "" { + continue // Private field + } + if !isZero(v.Field(i)) { + return false + } + } + return true + } + return false +} + +// ParserGetEvents parses the YAML input and returns the generated event stream. +func ParserGetEvents(in []byte) (string, error) { + p := newParser(in) + defer p.destroy() + var events strings.Builder + var event libyaml.Event + for { + if !p.parser.Parse(&event) { + return "", errors.New(p.parser.Problem) + } + formatted := formatEvent(&event) + events.WriteString(formatted) + if event.Type == libyaml.STREAM_END_EVENT { + event.Delete() + break + } + event.Delete() + events.WriteByte('\n') + } + return events.String(), nil +} + +func formatEvent(e *libyaml.Event) string { + var b strings.Builder + switch e.Type { + case libyaml.STREAM_START_EVENT: + b.WriteString("+STR") + case libyaml.STREAM_END_EVENT: + b.WriteString("-STR") + case libyaml.DOCUMENT_START_EVENT: + b.WriteString("+DOC") + if !e.Implicit { + b.WriteString(" ---") + } + case libyaml.DOCUMENT_END_EVENT: + b.WriteString("-DOC") + if !e.Implicit { + b.WriteString(" ...") + } + case libyaml.ALIAS_EVENT: + b.WriteString("=ALI *") + b.Write(e.Anchor) + case libyaml.SCALAR_EVENT: + b.WriteString("=VAL") + if len(e.Anchor) > 0 { + b.WriteString(" &") + b.Write(e.Anchor) + } + if len(e.Tag) > 0 { + b.WriteString(" <") + b.Write(e.Tag) + b.WriteString(">") + } + switch e.ScalarStyle() { + case libyaml.PLAIN_SCALAR_STYLE: + b.WriteString(" :") + case libyaml.LITERAL_SCALAR_STYLE: + b.WriteString(" |") + case libyaml.FOLDED_SCALAR_STYLE: + b.WriteString(" >") + case libyaml.SINGLE_QUOTED_SCALAR_STYLE: + b.WriteString(" '") + case libyaml.DOUBLE_QUOTED_SCALAR_STYLE: + b.WriteString(` "`) + } + // Escape special characters for consistent event output. + val := strings.NewReplacer( + `\`, `\\`, + "\n", `\n`, + "\t", `\t`, + ).Replace(string(e.Value)) + b.WriteString(val) + + case libyaml.SEQUENCE_START_EVENT: + b.WriteString("+SEQ") + if len(e.Anchor) > 0 { + b.WriteString(" &") + b.Write(e.Anchor) + } + if len(e.Tag) > 0 { + b.WriteString(" <") + b.Write(e.Tag) + b.WriteString(">") + } + if e.SequenceStyle() == libyaml.FLOW_SEQUENCE_STYLE { + b.WriteString(" []") + } + case libyaml.SEQUENCE_END_EVENT: + b.WriteString("-SEQ") + case libyaml.MAPPING_START_EVENT: + b.WriteString("+MAP") + if len(e.Anchor) > 0 { + b.WriteString(" &") + b.Write(e.Anchor) + } + if len(e.Tag) > 0 { + b.WriteString(" <") + b.Write(e.Tag) + b.WriteString(">") + } + if e.MappingStyle() == libyaml.FLOW_MAPPING_STYLE { + b.WriteString(" {}") + } + case libyaml.MAPPING_END_EVENT: + b.WriteString("-MAP") + } + return b.String() +} diff --git a/vendor/golang.org/x/sync/LICENSE b/vendor/golang.org/x/sync/LICENSE new file mode 100644 index 00000000..2a7cf70d --- /dev/null +++ b/vendor/golang.org/x/sync/LICENSE @@ -0,0 +1,27 @@ +Copyright 2009 The Go Authors. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google LLC nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/sync/PATENTS b/vendor/golang.org/x/sync/PATENTS new file mode 100644 index 00000000..73309904 --- /dev/null +++ b/vendor/golang.org/x/sync/PATENTS @@ -0,0 +1,22 @@ +Additional IP Rights Grant (Patents) + +"This implementation" means the copyrightable works distributed by +Google as part of the Go project. + +Google hereby grants to You a perpetual, worldwide, non-exclusive, +no-charge, royalty-free, irrevocable (except as stated in this section) +patent license to make, have made, use, offer to sell, sell, import, +transfer and otherwise run, modify and propagate the contents of this +implementation of Go, where such license applies only to those patent +claims, both currently owned or controlled by Google and acquired in +the future, licensable by Google that are necessarily infringed by this +implementation of Go. This grant does not include claims that would be +infringed only as a consequence of further modification of this +implementation. If you or your agent or exclusive licensee institute or +order or agree to the institution of patent litigation against any +entity (including a cross-claim or counterclaim in a lawsuit) alleging +that this implementation of Go or any code incorporated within this +implementation of Go constitutes direct or contributory patent +infringement, or inducement of patent infringement, then any patent +rights granted to you under this License for this implementation of Go +shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/sync/errgroup/errgroup.go b/vendor/golang.org/x/sync/errgroup/errgroup.go new file mode 100644 index 00000000..f69fd754 --- /dev/null +++ b/vendor/golang.org/x/sync/errgroup/errgroup.go @@ -0,0 +1,151 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package errgroup provides synchronization, error propagation, and Context +// cancellation for groups of goroutines working on subtasks of a common task. +// +// [errgroup.Group] is related to [sync.WaitGroup] but adds handling of tasks +// returning errors. +package errgroup + +import ( + "context" + "fmt" + "sync" +) + +type token struct{} + +// A Group is a collection of goroutines working on subtasks that are part of +// the same overall task. A Group should not be reused for different tasks. +// +// A zero Group is valid, has no limit on the number of active goroutines, +// and does not cancel on error. +type Group struct { + cancel func(error) + + wg sync.WaitGroup + + sem chan token + + errOnce sync.Once + err error +} + +func (g *Group) done() { + if g.sem != nil { + <-g.sem + } + g.wg.Done() +} + +// WithContext returns a new Group and an associated Context derived from ctx. +// +// The derived Context is canceled the first time a function passed to Go +// returns a non-nil error or the first time Wait returns, whichever occurs +// first. +func WithContext(ctx context.Context) (*Group, context.Context) { + ctx, cancel := context.WithCancelCause(ctx) + return &Group{cancel: cancel}, ctx +} + +// Wait blocks until all function calls from the Go method have returned, then +// returns the first non-nil error (if any) from them. +func (g *Group) Wait() error { + g.wg.Wait() + if g.cancel != nil { + g.cancel(g.err) + } + return g.err +} + +// Go calls the given function in a new goroutine. +// +// The first call to Go must happen before a Wait. +// It blocks until the new goroutine can be added without the number of +// goroutines in the group exceeding the configured limit. +// +// The first goroutine in the group that returns a non-nil error will +// cancel the associated Context, if any. The error will be returned +// by Wait. +func (g *Group) Go(f func() error) { + if g.sem != nil { + g.sem <- token{} + } + + g.wg.Add(1) + go func() { + defer g.done() + + // It is tempting to propagate panics from f() + // up to the goroutine that calls Wait, but + // it creates more problems than it solves: + // - it delays panics arbitrarily, + // making bugs harder to detect; + // - it turns f's panic stack into a mere value, + // hiding it from crash-monitoring tools; + // - it risks deadlocks that hide the panic entirely, + // if f's panic leaves the program in a state + // that prevents the Wait call from being reached. + // See #53757, #74275, #74304, #74306. + + if err := f(); err != nil { + g.errOnce.Do(func() { + g.err = err + if g.cancel != nil { + g.cancel(g.err) + } + }) + } + }() +} + +// TryGo calls the given function in a new goroutine only if the number of +// active goroutines in the group is currently below the configured limit. +// +// The return value reports whether the goroutine was started. +func (g *Group) TryGo(f func() error) bool { + if g.sem != nil { + select { + case g.sem <- token{}: + // Note: this allows barging iff channels in general allow barging. + default: + return false + } + } + + g.wg.Add(1) + go func() { + defer g.done() + + if err := f(); err != nil { + g.errOnce.Do(func() { + g.err = err + if g.cancel != nil { + g.cancel(g.err) + } + }) + } + }() + return true +} + +// SetLimit limits the number of active goroutines in this group to at most n. +// A negative value indicates no limit. +// A limit of zero will prevent any new goroutines from being added. +// +// Any subsequent call to the Go method will block until it can add an active +// goroutine without exceeding the configured limit. +// +// The limit must not be modified while any goroutines in the group are active. +func (g *Group) SetLimit(n int) { + if n < 0 { + g.sem = nil + return + } + if active := len(g.sem); active != 0 { + panic(fmt.Errorf("errgroup: modify limit while %v goroutines in the group are still active", active)) + } + g.sem = make(chan token, n) +} diff --git a/vendor/golang.org/x/text/feature/plural/common.go b/vendor/golang.org/x/text/feature/plural/common.go new file mode 100644 index 00000000..fdcb373f --- /dev/null +++ b/vendor/golang.org/x/text/feature/plural/common.go @@ -0,0 +1,70 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +package plural + +// Form defines a plural form. +// +// Not all languages support all forms. Also, the meaning of each form varies +// per language. It is important to note that the name of a form does not +// necessarily correspond one-to-one with the set of numbers. For instance, +// for Croation, One matches not only 1, but also 11, 21, etc. +// +// Each language must at least support the form "other". +type Form byte + +const ( + Other Form = iota + Zero + One + Two + Few + Many +) + +var countMap = map[string]Form{ + "other": Other, + "zero": Zero, + "one": One, + "two": Two, + "few": Few, + "many": Many, +} + +type pluralCheck struct { + // category: + // 3..7: opID + // 0..2: category + cat byte + setID byte +} + +// opID identifies the type of operand in the plural rule, being i, n or f. +// (v, w, and t are treated as filters in our implementation.) +type opID byte + +const ( + opMod opID = 0x1 // is '%' used? + opNotEqual opID = 0x2 // using "!=" to compare + opI opID = 0 << 2 // integers after taking the absolute value + opN opID = 1 << 2 // full number (must be integer) + opF opID = 2 << 2 // fraction + opV opID = 3 << 2 // number of visible digits + opW opID = 4 << 2 // number of visible digits without trailing zeros + opBretonM opID = 5 << 2 // hard-wired rule for Breton + opItalian800 opID = 6 << 2 // hard-wired rule for Italian + opAzerbaijan00s opID = 7 << 2 // hard-wired rule for Azerbaijan +) +const ( + // Use this plural form to indicate the next rule needs to match as well. + // The last condition in the list will have the correct plural form. + andNext = 0x7 + formMask = 0x7 + + opShift = 3 + + // numN indicates the maximum integer, or maximum mod value, for which we + // have inclusion masks. + numN = 100 + // The common denominator of the modulo that is taken. + maxMod = 100 +) diff --git a/vendor/golang.org/x/text/feature/plural/message.go b/vendor/golang.org/x/text/feature/plural/message.go new file mode 100644 index 00000000..56d518cc --- /dev/null +++ b/vendor/golang.org/x/text/feature/plural/message.go @@ -0,0 +1,244 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package plural + +import ( + "fmt" + "io" + "reflect" + "strconv" + + "golang.org/x/text/internal/catmsg" + "golang.org/x/text/internal/number" + "golang.org/x/text/language" + "golang.org/x/text/message/catalog" +) + +// TODO: consider deleting this interface. Maybe VisibleDigits is always +// sufficient and practical. + +// Interface is used for types that can determine their own plural form. +type Interface interface { + // PluralForm reports the plural form for the given language of the + // underlying value. It also returns the integer value. If the integer value + // is larger than fits in n, PluralForm may return a value modulo + // 10,000,000. + PluralForm(t language.Tag, scale int) (f Form, n int) +} + +// Selectf returns the first case for which its selector is a match for the +// arg-th substitution argument to a formatting call, formatting it as indicated +// by format. +// +// The cases argument are pairs of selectors and messages. Selectors are of type +// string or Form. Messages are of type string or catalog.Message. A selector +// matches an argument if: +// - it is "other" or Other +// - it matches the plural form of the argument: "zero", "one", "two", "few", +// or "many", or the equivalent Form +// - it is of the form "=x" where x is an integer that matches the value of +// the argument. +// - it is of the form " kindDefault { + e.EncodeUint(uint64(m.scale)) + } + + forms := validForms(cardinal, e.Language()) + + for i := 0; i < len(m.cases); { + if err := compileSelector(e, forms, m.cases[i]); err != nil { + return err + } + if i++; i >= len(m.cases) { + return fmt.Errorf("plural: no message defined for selector %v", m.cases[i-1]) + } + var msg catalog.Message + switch x := m.cases[i].(type) { + case string: + msg = catalog.String(x) + case catalog.Message: + msg = x + default: + return fmt.Errorf("plural: message of type %T; must be string or catalog.Message", x) + } + if err := e.EncodeMessage(msg); err != nil { + return err + } + i++ + } + return nil +} + +func compileSelector(e *catmsg.Encoder, valid []Form, selector interface{}) error { + form := Other + switch x := selector.(type) { + case string: + if x == "" { + return fmt.Errorf("plural: empty selector") + } + if c := x[0]; c == '=' || c == '<' { + val, err := strconv.ParseUint(x[1:], 10, 16) + if err != nil { + return fmt.Errorf("plural: invalid number in selector %q: %v", selector, err) + } + e.EncodeUint(uint64(c)) + e.EncodeUint(val) + return nil + } + var ok bool + form, ok = countMap[x] + if !ok { + return fmt.Errorf("plural: invalid plural form %q", selector) + } + case Form: + form = x + default: + return fmt.Errorf("plural: selector of type %T; want string or Form", selector) + } + + ok := false + for _, f := range valid { + if f == form { + ok = true + break + } + } + if !ok { + return fmt.Errorf("plural: form %q not supported for language %q", selector, e.Language()) + } + e.EncodeUint(uint64(form)) + return nil +} + +func execute(d *catmsg.Decoder) bool { + lang := d.Language() + argN := int(d.DecodeUint()) + kind := int(d.DecodeUint()) + scale := -1 // default + if kind > kindDefault { + scale = int(d.DecodeUint()) + } + form := Other + n := -1 + if arg := d.Arg(argN); arg == nil { + // Default to Other. + } else if x, ok := arg.(number.VisibleDigits); ok { + d := x.Digits(nil, lang, scale) + form, n = cardinal.matchDisplayDigits(lang, &d) + } else if x, ok := arg.(Interface); ok { + // This covers lists and formatters from the number package. + form, n = x.PluralForm(lang, scale) + } else { + var f number.Formatter + switch kind { + case kindScale: + f.InitDecimal(lang) + f.SetScale(scale) + case kindScientific: + f.InitScientific(lang) + f.SetScale(scale) + case kindPrecision: + f.InitDecimal(lang) + f.SetPrecision(scale) + case kindDefault: + // sensible default + f.InitDecimal(lang) + if k := reflect.TypeOf(arg).Kind(); reflect.Int <= k && k <= reflect.Uintptr { + f.SetScale(0) + } else { + f.SetScale(2) + } + } + var dec number.Decimal // TODO: buffer in Printer + dec.Convert(f.RoundingContext, arg) + v := number.FormatDigits(&dec, f.RoundingContext) + if !v.NaN && !v.Inf { + form, n = cardinal.matchDisplayDigits(d.Language(), &v) + } + } + for !d.Done() { + f := d.DecodeUint() + if (f == '=' && n == int(d.DecodeUint())) || + (f == '<' && 0 <= n && n < int(d.DecodeUint())) || + form == Form(f) || + Other == Form(f) { + return d.ExecuteMessage() + } + d.SkipMessage() + } + return false +} diff --git a/vendor/golang.org/x/text/feature/plural/plural.go b/vendor/golang.org/x/text/feature/plural/plural.go new file mode 100644 index 00000000..e9f2d42e --- /dev/null +++ b/vendor/golang.org/x/text/feature/plural/plural.go @@ -0,0 +1,262 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:generate go run gen.go gen_common.go + +// Package plural provides utilities for handling linguistic plurals in text. +// +// The definitions in this package are based on the plural rule handling defined +// in CLDR. See +// https://unicode.org/reports/tr35/tr35-numbers.html#Language_Plural_Rules for +// details. +package plural + +import ( + "golang.org/x/text/internal/language/compact" + "golang.org/x/text/internal/number" + "golang.org/x/text/language" +) + +// Rules defines the plural rules for all languages for a certain plural type. +// +// This package is UNDER CONSTRUCTION and its API may change. +type Rules struct { + rules []pluralCheck + index []byte + langToIndex []byte + inclusionMasks []uint64 +} + +var ( + // Cardinal defines the plural rules for numbers indicating quantities. + Cardinal *Rules = cardinal + + // Ordinal defines the plural rules for numbers indicating position + // (first, second, etc.). + Ordinal *Rules = ordinal + + ordinal = &Rules{ + ordinalRules, + ordinalIndex, + ordinalLangToIndex, + ordinalInclusionMasks[:], + } + + cardinal = &Rules{ + cardinalRules, + cardinalIndex, + cardinalLangToIndex, + cardinalInclusionMasks[:], + } +) + +// getIntApprox converts the digits in slice digits[start:end] to an integer +// according to the following rules: +// - Let i be asInt(digits[start:end]), where out-of-range digits are assumed +// to be zero. +// - Result n is big if i / 10^nMod > 1. +// - Otherwise the result is i % 10^nMod. +// +// For example, if digits is {1, 2, 3} and start:end is 0:5, then the result +// for various values of nMod is: +// - when nMod == 2, n == big +// - when nMod == 3, n == big +// - when nMod == 4, n == big +// - when nMod == 5, n == 12300 +// - when nMod == 6, n == 12300 +// - when nMod == 7, n == 12300 +func getIntApprox(digits []byte, start, end, nMod, big int) (n int) { + // Leading 0 digits just result in 0. + p := start + if p < 0 { + p = 0 + } + // Range only over the part for which we have digits. + mid := end + if mid >= len(digits) { + mid = len(digits) + } + // Check digits more significant that nMod. + if q := end - nMod; q > 0 { + if q > mid { + q = mid + } + for ; p < q; p++ { + if digits[p] != 0 { + return big + } + } + } + for ; p < mid; p++ { + n = 10*n + int(digits[p]) + } + // Multiply for trailing zeros. + for ; p < end; p++ { + n *= 10 + } + return n +} + +// MatchDigits computes the plural form for the given language and the given +// decimal floating point digits. The digits are stored in big-endian order and +// are of value byte(0) - byte(9). The floating point position is indicated by +// exp and the number of visible decimals is scale. All leading and trailing +// zeros may be omitted from digits. +// +// The following table contains examples of possible arguments to represent +// the given numbers. +// +// decimal digits exp scale +// 123 []byte{1, 2, 3} 3 0 +// 123.4 []byte{1, 2, 3, 4} 3 1 +// 123.40 []byte{1, 2, 3, 4} 3 2 +// 100000 []byte{1} 6 0 +// 100000.00 []byte{1} 6 3 +func (p *Rules) MatchDigits(t language.Tag, digits []byte, exp, scale int) Form { + index := tagToID(t) + + // Differentiate up to including mod 1000000 for the integer part. + n := getIntApprox(digits, 0, exp, 6, 1000000) + + // Differentiate up to including mod 100 for the fractional part. + f := getIntApprox(digits, exp, exp+scale, 2, 100) + + return matchPlural(p, index, n, f, scale) +} + +func (p *Rules) matchDisplayDigits(t language.Tag, d *number.Digits) (Form, int) { + n := getIntApprox(d.Digits, 0, int(d.Exp), 6, 1000000) + return p.MatchDigits(t, d.Digits, int(d.Exp), d.NumFracDigits()), n +} + +func validForms(p *Rules, t language.Tag) (forms []Form) { + offset := p.langToIndex[tagToID(t)] + rules := p.rules[p.index[offset]:p.index[offset+1]] + + forms = append(forms, Other) + last := Other + for _, r := range rules { + if cat := Form(r.cat & formMask); cat != andNext && last != cat { + forms = append(forms, cat) + last = cat + } + } + return forms +} + +func (p *Rules) matchComponents(t language.Tag, n, f, scale int) Form { + return matchPlural(p, tagToID(t), n, f, scale) +} + +// MatchPlural returns the plural form for the given language and plural +// operands (as defined in +// https://unicode.org/reports/tr35/tr35-numbers.html#Language_Plural_Rules): +// +// where +// n absolute value of the source number (integer and decimals) +// input +// i integer digits of n. +// v number of visible fraction digits in n, with trailing zeros. +// w number of visible fraction digits in n, without trailing zeros. +// f visible fractional digits in n, with trailing zeros (f = t * 10^(v-w)) +// t visible fractional digits in n, without trailing zeros. +// +// If any of the operand values is too large to fit in an int, it is okay to +// pass the value modulo 10,000,000. +func (p *Rules) MatchPlural(lang language.Tag, i, v, w, f, t int) Form { + return matchPlural(p, tagToID(lang), i, f, v) +} + +func matchPlural(p *Rules, index compact.ID, n, f, v int) Form { + nMask := p.inclusionMasks[n%maxMod] + // Compute the fMask inline in the rules below, as it is relatively rare. + // fMask := p.inclusionMasks[f%maxMod] + vMask := p.inclusionMasks[v%maxMod] + + // Do the matching + offset := p.langToIndex[index] + rules := p.rules[p.index[offset]:p.index[offset+1]] + for i := 0; i < len(rules); i++ { + rule := rules[i] + setBit := uint64(1 << rule.setID) + var skip bool + switch op := opID(rule.cat >> opShift); op { + case opI: // i = x + skip = n >= numN || nMask&setBit == 0 + + case opI | opNotEqual: // i != x + skip = n < numN && nMask&setBit != 0 + + case opI | opMod: // i % m = x + skip = nMask&setBit == 0 + + case opI | opMod | opNotEqual: // i % m != x + skip = nMask&setBit != 0 + + case opN: // n = x + skip = f != 0 || n >= numN || nMask&setBit == 0 + + case opN | opNotEqual: // n != x + skip = f == 0 && n < numN && nMask&setBit != 0 + + case opN | opMod: // n % m = x + skip = f != 0 || nMask&setBit == 0 + + case opN | opMod | opNotEqual: // n % m != x + skip = f == 0 && nMask&setBit != 0 + + case opF: // f = x + skip = f >= numN || p.inclusionMasks[f%maxMod]&setBit == 0 + + case opF | opNotEqual: // f != x + skip = f < numN && p.inclusionMasks[f%maxMod]&setBit != 0 + + case opF | opMod: // f % m = x + skip = p.inclusionMasks[f%maxMod]&setBit == 0 + + case opF | opMod | opNotEqual: // f % m != x + skip = p.inclusionMasks[f%maxMod]&setBit != 0 + + case opV: // v = x + skip = v < numN && vMask&setBit == 0 + + case opV | opNotEqual: // v != x + skip = v < numN && vMask&setBit != 0 + + case opW: // w == 0 + skip = f != 0 + + case opW | opNotEqual: // w != 0 + skip = f == 0 + + // Hard-wired rules that cannot be handled by our algorithm. + + case opBretonM: + skip = f != 0 || n == 0 || n%1000000 != 0 + + case opAzerbaijan00s: + // 100,200,300,400,500,600,700,800,900 + skip = n == 0 || n >= 1000 || n%100 != 0 + + case opItalian800: + skip = (f != 0 || n >= numN || nMask&setBit == 0) && n != 800 + } + if skip { + // advance over AND entries. + for ; i < len(rules) && rules[i].cat&formMask == andNext; i++ { + } + continue + } + // return if we have a final entry. + if cat := rule.cat & formMask; cat != andNext { + return Form(cat) + } + } + return Other +} + +func tagToID(t language.Tag) compact.ID { + id, _ := compact.RegionalID(compact.Tag(t)) + return id +} diff --git a/vendor/golang.org/x/text/feature/plural/tables.go b/vendor/golang.org/x/text/feature/plural/tables.go new file mode 100644 index 00000000..b06b9cb4 --- /dev/null +++ b/vendor/golang.org/x/text/feature/plural/tables.go @@ -0,0 +1,552 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +package plural + +// CLDRVersion is the CLDR version from which the tables in this package are derived. +const CLDRVersion = "32" + +var ordinalRules = []pluralCheck{ // 64 elements + 0: {cat: 0x2f, setID: 0x4}, + 1: {cat: 0x3a, setID: 0x5}, + 2: {cat: 0x22, setID: 0x1}, + 3: {cat: 0x22, setID: 0x6}, + 4: {cat: 0x22, setID: 0x7}, + 5: {cat: 0x2f, setID: 0x8}, + 6: {cat: 0x3c, setID: 0x9}, + 7: {cat: 0x2f, setID: 0xa}, + 8: {cat: 0x3c, setID: 0xb}, + 9: {cat: 0x2c, setID: 0xc}, + 10: {cat: 0x24, setID: 0xd}, + 11: {cat: 0x2d, setID: 0xe}, + 12: {cat: 0x2d, setID: 0xf}, + 13: {cat: 0x2f, setID: 0x10}, + 14: {cat: 0x35, setID: 0x3}, + 15: {cat: 0xc5, setID: 0x11}, + 16: {cat: 0x2, setID: 0x1}, + 17: {cat: 0x5, setID: 0x3}, + 18: {cat: 0xd, setID: 0x12}, + 19: {cat: 0x22, setID: 0x1}, + 20: {cat: 0x2f, setID: 0x13}, + 21: {cat: 0x3d, setID: 0x14}, + 22: {cat: 0x2f, setID: 0x15}, + 23: {cat: 0x3a, setID: 0x16}, + 24: {cat: 0x2f, setID: 0x17}, + 25: {cat: 0x3b, setID: 0x18}, + 26: {cat: 0x2f, setID: 0xa}, + 27: {cat: 0x3c, setID: 0xb}, + 28: {cat: 0x22, setID: 0x1}, + 29: {cat: 0x23, setID: 0x19}, + 30: {cat: 0x24, setID: 0x1a}, + 31: {cat: 0x22, setID: 0x1b}, + 32: {cat: 0x23, setID: 0x2}, + 33: {cat: 0x24, setID: 0x1a}, + 34: {cat: 0xf, setID: 0x15}, + 35: {cat: 0x1a, setID: 0x16}, + 36: {cat: 0xf, setID: 0x17}, + 37: {cat: 0x1b, setID: 0x18}, + 38: {cat: 0xf, setID: 0x1c}, + 39: {cat: 0x1d, setID: 0x1d}, + 40: {cat: 0xa, setID: 0x1e}, + 41: {cat: 0xa, setID: 0x1f}, + 42: {cat: 0xc, setID: 0x20}, + 43: {cat: 0xe4, setID: 0x0}, + 44: {cat: 0x5, setID: 0x3}, + 45: {cat: 0xd, setID: 0xe}, + 46: {cat: 0xd, setID: 0x21}, + 47: {cat: 0x22, setID: 0x1}, + 48: {cat: 0x23, setID: 0x19}, + 49: {cat: 0x24, setID: 0x1a}, + 50: {cat: 0x25, setID: 0x22}, + 51: {cat: 0x22, setID: 0x23}, + 52: {cat: 0x23, setID: 0x19}, + 53: {cat: 0x24, setID: 0x1a}, + 54: {cat: 0x25, setID: 0x22}, + 55: {cat: 0x22, setID: 0x24}, + 56: {cat: 0x23, setID: 0x19}, + 57: {cat: 0x24, setID: 0x1a}, + 58: {cat: 0x25, setID: 0x22}, + 59: {cat: 0x21, setID: 0x25}, + 60: {cat: 0x22, setID: 0x1}, + 61: {cat: 0x23, setID: 0x2}, + 62: {cat: 0x24, setID: 0x26}, + 63: {cat: 0x25, setID: 0x27}, +} // Size: 152 bytes + +var ordinalIndex = []uint8{ // 22 elements + 0x00, 0x00, 0x02, 0x03, 0x04, 0x05, 0x07, 0x09, + 0x0b, 0x0f, 0x10, 0x13, 0x16, 0x1c, 0x1f, 0x22, + 0x28, 0x2f, 0x33, 0x37, 0x3b, 0x40, +} // Size: 46 bytes + +var ordinalLangToIndex = []uint8{ // 775 elements + // Entry 0 - 3F + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x12, 0x12, 0x00, 0x00, 0x00, 0x00, 0x10, 0x10, + 0x10, 0x10, 0x10, 0x00, 0x00, 0x05, 0x05, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // Entry 40 - 7F + 0x12, 0x12, 0x12, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0e, + 0x0e, 0x0e, 0x0e, 0x0e, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x14, 0x14, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // Entry 80 - BF + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x0c, 0x0c, + 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, + 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, + 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, + 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, + 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, + 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, + 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, + // Entry C0 - FF + 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, + 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, + 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, + 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, + 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, + 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x0c, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // Entry 100 - 13F + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x02, + 0x00, 0x00, 0x00, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + // Entry 140 - 17F + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x00, 0x00, 0x00, 0x00, 0x02, 0x02, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x11, 0x11, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x11, + 0x11, 0x00, 0x00, 0x00, 0x00, 0x00, 0x03, 0x03, + 0x02, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // Entry 180 - 1BF + 0x00, 0x00, 0x00, 0x00, 0x09, 0x09, 0x09, 0x09, + 0x09, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x0a, 0x0a, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x08, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // Entry 1C0 - 1FF + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x02, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x0f, 0x0f, 0x00, 0x00, + 0x00, 0x00, 0x02, 0x0d, 0x0d, 0x02, 0x02, 0x02, + 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // Entry 200 - 23F + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x04, 0x04, 0x04, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x13, 0x13, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // Entry 240 - 27F + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, + 0x02, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // Entry 280 - 2BF + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x0b, 0x0b, 0x0b, 0x0b, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x01, + 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x07, 0x07, 0x02, 0x00, 0x00, 0x00, 0x00, + // Entry 2C0 - 2FF + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x06, 0x06, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0x02, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // Entry 300 - 33F + 0x00, 0x00, 0x00, 0x00, 0x00, 0x0e, 0x0c, +} // Size: 799 bytes + +var ordinalInclusionMasks = []uint64{ // 100 elements + // Entry 0 - 1F + 0x0000002000010009, 0x00000018482000d3, 0x0000000042840195, 0x000000410a040581, + 0x00000041040c0081, 0x0000009840040041, 0x0000008400045001, 0x0000003850040001, + 0x0000003850060001, 0x0000003800049001, 0x0000000800052001, 0x0000000040660031, + 0x0000000041840331, 0x0000000100040f01, 0x00000001001c0001, 0x0000000040040001, + 0x0000000000045001, 0x0000000070040001, 0x0000000070040001, 0x0000000000049001, + 0x0000000080050001, 0x0000000040200011, 0x0000000040800111, 0x0000000100000501, + 0x0000000100080001, 0x0000000040000001, 0x0000000000005001, 0x0000000050000001, + 0x0000000050000001, 0x0000000000009001, 0x0000000000010001, 0x0000000040200011, + // Entry 20 - 3F + 0x0000000040800111, 0x0000000100000501, 0x0000000100080001, 0x0000000040000001, + 0x0000000000005001, 0x0000000050000001, 0x0000000050000001, 0x0000000000009001, + 0x0000000200050001, 0x0000000040200011, 0x0000000040800111, 0x0000000100000501, + 0x0000000100080001, 0x0000000040000001, 0x0000000000005001, 0x0000000050000001, + 0x0000000050000001, 0x0000000000009001, 0x0000000080010001, 0x0000000040200011, + 0x0000000040800111, 0x0000000100000501, 0x0000000100080001, 0x0000000040000001, + 0x0000000000005001, 0x0000000050000001, 0x0000000050000001, 0x0000000000009001, + 0x0000000200050001, 0x0000000040200011, 0x0000000040800111, 0x0000000100000501, + // Entry 40 - 5F + 0x0000000100080001, 0x0000000040000001, 0x0000000000005001, 0x0000000050000001, + 0x0000000050000001, 0x0000000000009001, 0x0000000080010001, 0x0000000040200011, + 0x0000000040800111, 0x0000000100000501, 0x0000000100080001, 0x0000000040000001, + 0x0000000000005001, 0x0000000050000001, 0x0000000050000001, 0x0000000000009001, + 0x0000000080070001, 0x0000000040200011, 0x0000000040800111, 0x0000000100000501, + 0x0000000100080001, 0x0000000040000001, 0x0000000000005001, 0x0000000050000001, + 0x0000000050000001, 0x0000000000009001, 0x0000000200010001, 0x0000000040200011, + 0x0000000040800111, 0x0000000100000501, 0x0000000100080001, 0x0000000040000001, + // Entry 60 - 7F + 0x0000000000005001, 0x0000000050000001, 0x0000000050000001, 0x0000000000009001, +} // Size: 824 bytes + +// Slots used for ordinal: 40 of 0xFF rules; 16 of 0xFF indexes; 40 of 64 sets + +var cardinalRules = []pluralCheck{ // 166 elements + 0: {cat: 0x2, setID: 0x3}, + 1: {cat: 0x22, setID: 0x1}, + 2: {cat: 0x2, setID: 0x4}, + 3: {cat: 0x2, setID: 0x4}, + 4: {cat: 0x7, setID: 0x1}, + 5: {cat: 0x62, setID: 0x3}, + 6: {cat: 0x22, setID: 0x4}, + 7: {cat: 0x7, setID: 0x3}, + 8: {cat: 0x42, setID: 0x1}, + 9: {cat: 0x22, setID: 0x4}, + 10: {cat: 0x22, setID: 0x4}, + 11: {cat: 0x22, setID: 0x5}, + 12: {cat: 0x22, setID: 0x1}, + 13: {cat: 0x22, setID: 0x1}, + 14: {cat: 0x7, setID: 0x4}, + 15: {cat: 0x92, setID: 0x3}, + 16: {cat: 0xf, setID: 0x6}, + 17: {cat: 0x1f, setID: 0x7}, + 18: {cat: 0x82, setID: 0x3}, + 19: {cat: 0x92, setID: 0x3}, + 20: {cat: 0xf, setID: 0x6}, + 21: {cat: 0x62, setID: 0x3}, + 22: {cat: 0x4a, setID: 0x6}, + 23: {cat: 0x7, setID: 0x8}, + 24: {cat: 0x62, setID: 0x3}, + 25: {cat: 0x1f, setID: 0x9}, + 26: {cat: 0x62, setID: 0x3}, + 27: {cat: 0x5f, setID: 0x9}, + 28: {cat: 0x72, setID: 0x3}, + 29: {cat: 0x29, setID: 0xa}, + 30: {cat: 0x29, setID: 0xb}, + 31: {cat: 0x4f, setID: 0xb}, + 32: {cat: 0x61, setID: 0x2}, + 33: {cat: 0x2f, setID: 0x6}, + 34: {cat: 0x3a, setID: 0x7}, + 35: {cat: 0x4f, setID: 0x6}, + 36: {cat: 0x5f, setID: 0x7}, + 37: {cat: 0x62, setID: 0x2}, + 38: {cat: 0x4f, setID: 0x6}, + 39: {cat: 0x72, setID: 0x2}, + 40: {cat: 0x21, setID: 0x3}, + 41: {cat: 0x7, setID: 0x4}, + 42: {cat: 0x32, setID: 0x3}, + 43: {cat: 0x21, setID: 0x3}, + 44: {cat: 0x22, setID: 0x1}, + 45: {cat: 0x22, setID: 0x1}, + 46: {cat: 0x23, setID: 0x2}, + 47: {cat: 0x2, setID: 0x3}, + 48: {cat: 0x22, setID: 0x1}, + 49: {cat: 0x24, setID: 0xc}, + 50: {cat: 0x7, setID: 0x1}, + 51: {cat: 0x62, setID: 0x3}, + 52: {cat: 0x74, setID: 0x3}, + 53: {cat: 0x24, setID: 0x3}, + 54: {cat: 0x2f, setID: 0xd}, + 55: {cat: 0x34, setID: 0x1}, + 56: {cat: 0xf, setID: 0x6}, + 57: {cat: 0x1f, setID: 0x7}, + 58: {cat: 0x62, setID: 0x3}, + 59: {cat: 0x4f, setID: 0x6}, + 60: {cat: 0x5a, setID: 0x7}, + 61: {cat: 0xf, setID: 0xe}, + 62: {cat: 0x1f, setID: 0xf}, + 63: {cat: 0x64, setID: 0x3}, + 64: {cat: 0x4f, setID: 0xe}, + 65: {cat: 0x5c, setID: 0xf}, + 66: {cat: 0x22, setID: 0x10}, + 67: {cat: 0x23, setID: 0x11}, + 68: {cat: 0x24, setID: 0x12}, + 69: {cat: 0xf, setID: 0x1}, + 70: {cat: 0x62, setID: 0x3}, + 71: {cat: 0xf, setID: 0x2}, + 72: {cat: 0x63, setID: 0x3}, + 73: {cat: 0xf, setID: 0x13}, + 74: {cat: 0x64, setID: 0x3}, + 75: {cat: 0x74, setID: 0x3}, + 76: {cat: 0xf, setID: 0x1}, + 77: {cat: 0x62, setID: 0x3}, + 78: {cat: 0x4a, setID: 0x1}, + 79: {cat: 0xf, setID: 0x2}, + 80: {cat: 0x63, setID: 0x3}, + 81: {cat: 0x4b, setID: 0x2}, + 82: {cat: 0xf, setID: 0x13}, + 83: {cat: 0x64, setID: 0x3}, + 84: {cat: 0x4c, setID: 0x13}, + 85: {cat: 0x7, setID: 0x1}, + 86: {cat: 0x62, setID: 0x3}, + 87: {cat: 0x7, setID: 0x2}, + 88: {cat: 0x63, setID: 0x3}, + 89: {cat: 0x2f, setID: 0xa}, + 90: {cat: 0x37, setID: 0x14}, + 91: {cat: 0x65, setID: 0x3}, + 92: {cat: 0x7, setID: 0x1}, + 93: {cat: 0x62, setID: 0x3}, + 94: {cat: 0x7, setID: 0x15}, + 95: {cat: 0x64, setID: 0x3}, + 96: {cat: 0x75, setID: 0x3}, + 97: {cat: 0x7, setID: 0x1}, + 98: {cat: 0x62, setID: 0x3}, + 99: {cat: 0xf, setID: 0xe}, + 100: {cat: 0x1f, setID: 0xf}, + 101: {cat: 0x64, setID: 0x3}, + 102: {cat: 0xf, setID: 0x16}, + 103: {cat: 0x17, setID: 0x1}, + 104: {cat: 0x65, setID: 0x3}, + 105: {cat: 0xf, setID: 0x17}, + 106: {cat: 0x65, setID: 0x3}, + 107: {cat: 0xf, setID: 0xf}, + 108: {cat: 0x65, setID: 0x3}, + 109: {cat: 0x2f, setID: 0x6}, + 110: {cat: 0x3a, setID: 0x7}, + 111: {cat: 0x2f, setID: 0xe}, + 112: {cat: 0x3c, setID: 0xf}, + 113: {cat: 0x2d, setID: 0xa}, + 114: {cat: 0x2d, setID: 0x17}, + 115: {cat: 0x2d, setID: 0x18}, + 116: {cat: 0x2f, setID: 0x6}, + 117: {cat: 0x3a, setID: 0xb}, + 118: {cat: 0x2f, setID: 0x19}, + 119: {cat: 0x3c, setID: 0xb}, + 120: {cat: 0x55, setID: 0x3}, + 121: {cat: 0x22, setID: 0x1}, + 122: {cat: 0x24, setID: 0x3}, + 123: {cat: 0x2c, setID: 0xc}, + 124: {cat: 0x2d, setID: 0xb}, + 125: {cat: 0xf, setID: 0x6}, + 126: {cat: 0x1f, setID: 0x7}, + 127: {cat: 0x62, setID: 0x3}, + 128: {cat: 0xf, setID: 0xe}, + 129: {cat: 0x1f, setID: 0xf}, + 130: {cat: 0x64, setID: 0x3}, + 131: {cat: 0xf, setID: 0xa}, + 132: {cat: 0x65, setID: 0x3}, + 133: {cat: 0xf, setID: 0x17}, + 134: {cat: 0x65, setID: 0x3}, + 135: {cat: 0xf, setID: 0x18}, + 136: {cat: 0x65, setID: 0x3}, + 137: {cat: 0x2f, setID: 0x6}, + 138: {cat: 0x3a, setID: 0x1a}, + 139: {cat: 0x2f, setID: 0x1b}, + 140: {cat: 0x3b, setID: 0x1c}, + 141: {cat: 0x2f, setID: 0x1d}, + 142: {cat: 0x3c, setID: 0x1e}, + 143: {cat: 0x37, setID: 0x3}, + 144: {cat: 0xa5, setID: 0x0}, + 145: {cat: 0x22, setID: 0x1}, + 146: {cat: 0x23, setID: 0x2}, + 147: {cat: 0x24, setID: 0x1f}, + 148: {cat: 0x25, setID: 0x20}, + 149: {cat: 0xf, setID: 0x6}, + 150: {cat: 0x62, setID: 0x3}, + 151: {cat: 0xf, setID: 0x1b}, + 152: {cat: 0x63, setID: 0x3}, + 153: {cat: 0xf, setID: 0x21}, + 154: {cat: 0x64, setID: 0x3}, + 155: {cat: 0x75, setID: 0x3}, + 156: {cat: 0x21, setID: 0x3}, + 157: {cat: 0x22, setID: 0x1}, + 158: {cat: 0x23, setID: 0x2}, + 159: {cat: 0x2c, setID: 0x22}, + 160: {cat: 0x2d, setID: 0x5}, + 161: {cat: 0x21, setID: 0x3}, + 162: {cat: 0x22, setID: 0x1}, + 163: {cat: 0x23, setID: 0x2}, + 164: {cat: 0x24, setID: 0x23}, + 165: {cat: 0x25, setID: 0x24}, +} // Size: 356 bytes + +var cardinalIndex = []uint8{ // 36 elements + 0x00, 0x00, 0x02, 0x03, 0x04, 0x06, 0x09, 0x0a, + 0x0c, 0x0d, 0x10, 0x14, 0x17, 0x1d, 0x28, 0x2b, + 0x2d, 0x2f, 0x32, 0x38, 0x42, 0x45, 0x4c, 0x55, + 0x5c, 0x61, 0x6d, 0x74, 0x79, 0x7d, 0x89, 0x91, + 0x95, 0x9c, 0xa1, 0xa6, +} // Size: 60 bytes + +var cardinalLangToIndex = []uint8{ // 775 elements + // Entry 0 - 3F + 0x00, 0x08, 0x08, 0x08, 0x00, 0x00, 0x06, 0x06, + 0x01, 0x01, 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, + 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, + 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, + 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, 0x21, + 0x01, 0x01, 0x08, 0x08, 0x04, 0x04, 0x08, 0x08, + 0x08, 0x08, 0x08, 0x00, 0x00, 0x1a, 0x1a, 0x08, + 0x08, 0x08, 0x08, 0x08, 0x08, 0x06, 0x00, 0x00, + // Entry 40 - 7F + 0x01, 0x01, 0x01, 0x00, 0x00, 0x00, 0x1e, 0x1e, + 0x08, 0x08, 0x13, 0x13, 0x13, 0x13, 0x13, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x00, 0x00, 0x00, 0x08, + 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, + 0x18, 0x18, 0x00, 0x00, 0x22, 0x22, 0x09, 0x09, + 0x09, 0x00, 0x00, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x00, 0x00, 0x16, 0x16, 0x00, + 0x00, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // Entry 80 - BF + 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + // Entry C0 - FF + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x08, + 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, + 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, + // Entry 100 - 13F + 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, + 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x04, 0x04, + 0x08, 0x08, 0x00, 0x00, 0x01, 0x01, 0x01, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x04, 0x04, 0x0c, 0x0c, + 0x08, 0x08, 0x08, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + // Entry 140 - 17F + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x08, 0x08, 0x04, 0x04, 0x1f, 0x1f, + 0x14, 0x14, 0x04, 0x04, 0x08, 0x08, 0x08, 0x08, + 0x01, 0x01, 0x06, 0x00, 0x00, 0x20, 0x20, 0x08, + 0x08, 0x08, 0x08, 0x08, 0x08, 0x17, 0x17, 0x01, + 0x01, 0x13, 0x13, 0x13, 0x16, 0x16, 0x08, 0x08, + 0x02, 0x02, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // Entry 180 - 1BF + 0x00, 0x04, 0x0a, 0x0a, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x10, 0x17, 0x00, 0x00, 0x00, 0x08, 0x08, + 0x04, 0x08, 0x08, 0x00, 0x00, 0x08, 0x08, 0x02, + 0x02, 0x08, 0x00, 0x00, 0x08, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x08, 0x08, + 0x08, 0x08, 0x08, 0x00, 0x00, 0x00, 0x00, 0x01, + 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08, 0x08, + 0x08, 0x08, 0x00, 0x00, 0x0f, 0x0f, 0x08, 0x10, + // Entry 1C0 - 1FF + 0x10, 0x08, 0x08, 0x0e, 0x0e, 0x08, 0x08, 0x08, + 0x08, 0x00, 0x00, 0x06, 0x06, 0x06, 0x06, 0x06, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x1b, 0x1b, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x0d, 0x0d, 0x08, + 0x08, 0x08, 0x00, 0x00, 0x00, 0x00, 0x06, 0x06, + 0x00, 0x00, 0x08, 0x08, 0x0b, 0x0b, 0x08, 0x08, + 0x08, 0x08, 0x12, 0x01, 0x01, 0x00, 0x00, 0x00, + 0x00, 0x1c, 0x1c, 0x00, 0x00, 0x00, 0x00, 0x00, + // Entry 200 - 23F + 0x00, 0x08, 0x10, 0x10, 0x08, 0x08, 0x08, 0x08, + 0x08, 0x00, 0x00, 0x00, 0x08, 0x08, 0x08, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x00, + 0x00, 0x08, 0x08, 0x08, 0x08, 0x08, 0x00, 0x08, + 0x06, 0x00, 0x00, 0x08, 0x08, 0x08, 0x08, 0x08, + 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x06, 0x06, + 0x06, 0x06, 0x06, 0x08, 0x19, 0x19, 0x0d, 0x0d, + 0x08, 0x08, 0x03, 0x04, 0x03, 0x04, 0x04, 0x04, + // Entry 240 - 27F + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x00, + 0x00, 0x00, 0x00, 0x08, 0x08, 0x00, 0x00, 0x12, + 0x12, 0x12, 0x08, 0x08, 0x1d, 0x1d, 0x1d, 0x1d, + 0x1d, 0x1d, 0x1d, 0x00, 0x00, 0x08, 0x08, 0x00, + 0x00, 0x08, 0x08, 0x00, 0x00, 0x08, 0x08, 0x08, + 0x10, 0x10, 0x10, 0x10, 0x08, 0x08, 0x00, 0x00, + 0x00, 0x00, 0x13, 0x11, 0x11, 0x11, 0x11, 0x11, + 0x05, 0x05, 0x18, 0x18, 0x15, 0x15, 0x10, 0x10, + // Entry 280 - 2BF + 0x10, 0x10, 0x10, 0x10, 0x08, 0x08, 0x08, 0x08, + 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x13, + 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, 0x13, + 0x13, 0x13, 0x08, 0x08, 0x08, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x08, 0x08, + 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, + 0x08, 0x00, 0x00, 0x00, 0x00, 0x06, 0x06, 0x06, + 0x08, 0x08, 0x08, 0x0c, 0x08, 0x00, 0x00, 0x08, + // Entry 2C0 - 2FF + 0x08, 0x08, 0x08, 0x00, 0x00, 0x00, 0x00, 0x07, + 0x07, 0x08, 0x08, 0x1d, 0x1d, 0x04, 0x04, 0x04, + 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x08, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x08, 0x00, 0x00, 0x08, + 0x08, 0x08, 0x08, 0x06, 0x08, 0x08, 0x00, 0x00, + 0x08, 0x08, 0x08, 0x00, 0x00, 0x04, 0x04, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, + // Entry 300 - 33F + 0x00, 0x00, 0x00, 0x01, 0x01, 0x04, 0x04, +} // Size: 799 bytes + +var cardinalInclusionMasks = []uint64{ // 100 elements + // Entry 0 - 1F + 0x0000000200500419, 0x0000000000512153, 0x000000000a327105, 0x0000000ca23c7101, + 0x00000004a23c7201, 0x0000000482943001, 0x0000001482943201, 0x0000000502943001, + 0x0000000502943001, 0x0000000522943201, 0x0000000540543401, 0x00000000454128e1, + 0x000000005b02e821, 0x000000006304e821, 0x000000006304ea21, 0x0000000042842821, + 0x0000000042842a21, 0x0000000042842821, 0x0000000042842821, 0x0000000062842a21, + 0x0000000200400421, 0x0000000000400061, 0x000000000a004021, 0x0000000022004021, + 0x0000000022004221, 0x0000000002800021, 0x0000000002800221, 0x0000000002800021, + 0x0000000002800021, 0x0000000022800221, 0x0000000000400421, 0x0000000000400061, + // Entry 20 - 3F + 0x000000000a004021, 0x0000000022004021, 0x0000000022004221, 0x0000000002800021, + 0x0000000002800221, 0x0000000002800021, 0x0000000002800021, 0x0000000022800221, + 0x0000000200400421, 0x0000000000400061, 0x000000000a004021, 0x0000000022004021, + 0x0000000022004221, 0x0000000002800021, 0x0000000002800221, 0x0000000002800021, + 0x0000000002800021, 0x0000000022800221, 0x0000000000400421, 0x0000000000400061, + 0x000000000a004021, 0x0000000022004021, 0x0000000022004221, 0x0000000002800021, + 0x0000000002800221, 0x0000000002800021, 0x0000000002800021, 0x0000000022800221, + 0x0000000200400421, 0x0000000000400061, 0x000000000a004021, 0x0000000022004021, + // Entry 40 - 5F + 0x0000000022004221, 0x0000000002800021, 0x0000000002800221, 0x0000000002800021, + 0x0000000002800021, 0x0000000022800221, 0x0000000040400421, 0x0000000044400061, + 0x000000005a004021, 0x0000000062004021, 0x0000000062004221, 0x0000000042800021, + 0x0000000042800221, 0x0000000042800021, 0x0000000042800021, 0x0000000062800221, + 0x0000000200400421, 0x0000000000400061, 0x000000000a004021, 0x0000000022004021, + 0x0000000022004221, 0x0000000002800021, 0x0000000002800221, 0x0000000002800021, + 0x0000000002800021, 0x0000000022800221, 0x0000000040400421, 0x0000000044400061, + 0x000000005a004021, 0x0000000062004021, 0x0000000062004221, 0x0000000042800021, + // Entry 60 - 7F + 0x0000000042800221, 0x0000000042800021, 0x0000000042800021, 0x0000000062800221, +} // Size: 824 bytes + +// Slots used for cardinal: A6 of 0xFF rules; 24 of 0xFF indexes; 37 of 64 sets + +// Total table size 3860 bytes (3KiB); checksum: AAFBF21 diff --git a/vendor/golang.org/x/text/internal/catmsg/catmsg.go b/vendor/golang.org/x/text/internal/catmsg/catmsg.go new file mode 100644 index 00000000..1b257a7b --- /dev/null +++ b/vendor/golang.org/x/text/internal/catmsg/catmsg.go @@ -0,0 +1,417 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package catmsg contains support types for package x/text/message/catalog. +// +// This package contains the low-level implementations of Message used by the +// catalog package and provides primitives for other packages to implement their +// own. For instance, the plural package provides functionality for selecting +// translation strings based on the plural category of substitution arguments. +// +// # Encoding and Decoding +// +// Catalogs store Messages encoded as a single string. Compiling a message into +// a string both results in compacter representation and speeds up evaluation. +// +// A Message must implement a Compile method to convert its arbitrary +// representation to a string. The Compile method takes an Encoder which +// facilitates serializing the message. Encoders also provide more context of +// the messages's creation (such as for which language the message is intended), +// which may not be known at the time of the creation of the message. +// +// Each message type must also have an accompanying decoder registered to decode +// the message. This decoder takes a Decoder argument which provides the +// counterparts for the decoding. +// +// # Renderers +// +// A Decoder must be initialized with a Renderer implementation. These +// implementations must be provided by packages that use Catalogs, typically +// formatting packages such as x/text/message. A typical user will not need to +// worry about this type; it is only relevant to packages that do string +// formatting and want to use the catalog package to handle localized strings. +// +// A package that uses catalogs for selecting strings receives selection results +// as sequence of substrings passed to the Renderer. The following snippet shows +// how to express the above example using the message package. +// +// message.Set(language.English, "You are %d minute(s) late.", +// catalog.Var("minutes", plural.Select(1, "one", "minute")), +// catalog.String("You are %[1]d ${minutes} late.")) +// +// p := message.NewPrinter(language.English) +// p.Printf("You are %d minute(s) late.", 5) // always 5 minutes late. +// +// To evaluate the Printf, package message wraps the arguments in a Renderer +// that is passed to the catalog for message decoding. The call sequence that +// results from evaluating the above message, assuming the person is rather +// tardy, is: +// +// Render("You are %[1]d ") +// Arg(1) +// Render("minutes") +// Render(" late.") +// +// The calls to Arg is caused by the plural.Select execution, which evaluates +// the argument to determine whether the singular or plural message form should +// be selected. The calls to Render reports the partial results to the message +// package for further evaluation. +package catmsg + +import ( + "errors" + "fmt" + "strconv" + "strings" + "sync" + + "golang.org/x/text/language" +) + +// A Handle refers to a registered message type. +type Handle int + +// A Handler decodes and evaluates data compiled by a Message and sends the +// result to the Decoder. The output may depend on the value of the substitution +// arguments, accessible by the Decoder's Arg method. The Handler returns false +// if there is no translation for the given substitution arguments. +type Handler func(d *Decoder) bool + +// Register records the existence of a message type and returns a Handle that +// can be used in the Encoder's EncodeMessageType method to create such +// messages. The prefix of the name should be the package path followed by +// an optional disambiguating string. +// Register will panic if a handle for the same name was already registered. +func Register(name string, handler Handler) Handle { + mutex.Lock() + defer mutex.Unlock() + + if _, ok := names[name]; ok { + panic(fmt.Errorf("catmsg: handler for %q already exists", name)) + } + h := Handle(len(handlers)) + names[name] = h + handlers = append(handlers, handler) + return h +} + +// These handlers require fixed positions in the handlers slice. +const ( + msgVars Handle = iota + msgFirst + msgRaw + msgString + msgAffix + // Leave some arbitrary room for future expansion: 20 should suffice. + numInternal = 20 +) + +const prefix = "golang.org/x/text/internal/catmsg." + +var ( + // TODO: find a more stable way to link handles to message types. + mutex sync.Mutex + names = map[string]Handle{ + prefix + "Vars": msgVars, + prefix + "First": msgFirst, + prefix + "Raw": msgRaw, + prefix + "String": msgString, + prefix + "Affix": msgAffix, + } + handlers = make([]Handler, numInternal) +) + +func init() { + // This handler is a message type wrapper that initializes a decoder + // with a variable block. This message type, if present, is always at the + // start of an encoded message. + handlers[msgVars] = func(d *Decoder) bool { + blockSize := int(d.DecodeUint()) + d.vars = d.data[:blockSize] + d.data = d.data[blockSize:] + return d.executeMessage() + } + + // First takes the first message in a sequence that results in a match for + // the given substitution arguments. + handlers[msgFirst] = func(d *Decoder) bool { + for !d.Done() { + if d.ExecuteMessage() { + return true + } + } + return false + } + + handlers[msgRaw] = func(d *Decoder) bool { + d.Render(d.data) + return true + } + + // A String message alternates between a string constant and a variable + // substitution. + handlers[msgString] = func(d *Decoder) bool { + for !d.Done() { + if str := d.DecodeString(); str != "" { + d.Render(str) + } + if d.Done() { + break + } + d.ExecuteSubstitution() + } + return true + } + + handlers[msgAffix] = func(d *Decoder) bool { + // TODO: use an alternative method for common cases. + prefix := d.DecodeString() + suffix := d.DecodeString() + if prefix != "" { + d.Render(prefix) + } + ret := d.ExecuteMessage() + if suffix != "" { + d.Render(suffix) + } + return ret + } +} + +var ( + // ErrIncomplete indicates a compiled message does not define translations + // for all possible argument values. If this message is returned, evaluating + // a message may result in the ErrNoMatch error. + ErrIncomplete = errors.New("catmsg: incomplete message; may not give result for all inputs") + + // ErrNoMatch indicates no translation message matched the given input + // parameters when evaluating a message. + ErrNoMatch = errors.New("catmsg: no translation for inputs") +) + +// A Message holds a collection of translations for the same phrase that may +// vary based on the values of substitution arguments. +type Message interface { + // Compile encodes the format string(s) of the message as a string for later + // evaluation. + // + // The first call Compile makes on the encoder must be EncodeMessageType. + // The handle passed to this call may either be a handle returned by + // Register to encode a single custom message, or HandleFirst followed by + // a sequence of calls to EncodeMessage. + // + // Compile must return ErrIncomplete if it is possible for evaluation to + // not match any translation for a given set of formatting parameters. + // For example, selecting a translation based on plural form may not yield + // a match if the form "Other" is not one of the selectors. + // + // Compile may return any other application-specific error. For backwards + // compatibility with package like fmt, which often do not do sanity + // checking of format strings ahead of time, Compile should still make an + // effort to have some sensible fallback in case of an error. + Compile(e *Encoder) error +} + +// Compile converts a Message to a data string that can be stored in a Catalog. +// The resulting string can subsequently be decoded by passing to the Execute +// method of a Decoder. +func Compile(tag language.Tag, macros Dictionary, m Message) (data string, err error) { + // TODO: pass macros so they can be used for validation. + v := &Encoder{inBody: true} // encoder for variables + v.root = v + e := &Encoder{root: v, parent: v, tag: tag} // encoder for messages + err = m.Compile(e) + // This package serves te message package, which in turn is meant to be a + // drop-in replacement for fmt. With the fmt package, format strings are + // evaluated lazily and errors are handled by substituting strings in the + // result, rather then returning an error. Dealing with multiple languages + // makes it more important to check errors ahead of time. We chose to be + // consistent and compatible and allow graceful degradation in case of + // errors. + buf := e.buf[stripPrefix(e.buf):] + if len(v.buf) > 0 { + // Prepend variable block. + b := make([]byte, 1+maxVarintBytes+len(v.buf)+len(buf)) + b[0] = byte(msgVars) + b = b[:1+encodeUint(b[1:], uint64(len(v.buf)))] + b = append(b, v.buf...) + b = append(b, buf...) + buf = b + } + if err == nil { + err = v.err + } + return string(buf), err +} + +// FirstOf is a message type that prints the first message in the sequence that +// resolves to a match for the given substitution arguments. +type FirstOf []Message + +// Compile implements Message. +func (s FirstOf) Compile(e *Encoder) error { + e.EncodeMessageType(msgFirst) + err := ErrIncomplete + for i, m := range s { + if err == nil { + return fmt.Errorf("catalog: message argument %d is complete and blocks subsequent messages", i-1) + } + err = e.EncodeMessage(m) + } + return err +} + +// Var defines a message that can be substituted for a placeholder of the same +// name. If an expression does not result in a string after evaluation, Name is +// used as the substitution. For example: +// +// Var{ +// Name: "minutes", +// Message: plural.Select(1, "one", "minute"), +// } +// +// will resolve to minute for singular and minutes for plural forms. +type Var struct { + Name string + Message Message +} + +var errIsVar = errors.New("catmsg: variable used as message") + +// Compile implements Message. +// +// Note that this method merely registers a variable; it does not create an +// encoded message. +func (v *Var) Compile(e *Encoder) error { + if err := e.addVar(v.Name, v.Message); err != nil { + return err + } + // Using a Var by itself is an error. If it is in a sequence followed by + // other messages referring to it, this error will be ignored. + return errIsVar +} + +// Raw is a message consisting of a single format string that is passed as is +// to the Renderer. +// +// Note that a Renderer may still do its own variable substitution. +type Raw string + +// Compile implements Message. +func (r Raw) Compile(e *Encoder) (err error) { + e.EncodeMessageType(msgRaw) + // Special case: raw strings don't have a size encoding and so don't use + // EncodeString. + e.buf = append(e.buf, r...) + return nil +} + +// String is a message consisting of a single format string which contains +// placeholders that may be substituted with variables. +// +// Variable substitutions are marked with placeholders and a variable name of +// the form ${name}. Any other substitutions such as Go templates or +// printf-style substitutions are left to be done by the Renderer. +// +// When evaluation a string interpolation, a Renderer will receive separate +// calls for each placeholder and interstitial string. For example, for the +// message: "%[1]v ${invites} %[2]v to ${their} party." The sequence of calls +// is: +// +// d.Render("%[1]v ") +// d.Arg(1) +// d.Render(resultOfInvites) +// d.Render(" %[2]v to ") +// d.Arg(2) +// d.Render(resultOfTheir) +// d.Render(" party.") +// +// where the messages for "invites" and "their" both use a plural.Select +// referring to the first argument. +// +// Strings may also invoke macros. Macros are essentially variables that can be +// reused. Macros may, for instance, be used to make selections between +// different conjugations of a verb. See the catalog package description for an +// overview of macros. +type String string + +// Compile implements Message. It parses the placeholder formats and returns +// any error. +func (s String) Compile(e *Encoder) (err error) { + msg := string(s) + const subStart = "${" + hasHeader := false + p := 0 + b := []byte{} + for { + i := strings.Index(msg[p:], subStart) + if i == -1 { + break + } + b = append(b, msg[p:p+i]...) + p += i + len(subStart) + if i = strings.IndexByte(msg[p:], '}'); i == -1 { + b = append(b, "$!(MISSINGBRACE)"...) + err = fmt.Errorf("catmsg: missing '}'") + p = len(msg) + break + } + name := strings.TrimSpace(msg[p : p+i]) + if q := strings.IndexByte(name, '('); q == -1 { + if !hasHeader { + hasHeader = true + e.EncodeMessageType(msgString) + } + e.EncodeString(string(b)) + e.EncodeSubstitution(name) + b = b[:0] + } else if j := strings.IndexByte(name[q:], ')'); j == -1 { + // TODO: what should the error be? + b = append(b, "$!(MISSINGPAREN)"...) + err = fmt.Errorf("catmsg: missing ')'") + } else if x, sErr := strconv.ParseUint(strings.TrimSpace(name[q+1:q+j]), 10, 32); sErr != nil { + // TODO: handle more than one argument + b = append(b, "$!(BADNUM)"...) + err = fmt.Errorf("catmsg: invalid number %q", strings.TrimSpace(name[q+1:q+j])) + } else { + if !hasHeader { + hasHeader = true + e.EncodeMessageType(msgString) + } + e.EncodeString(string(b)) + e.EncodeSubstitution(name[:q], int(x)) + b = b[:0] + } + p += i + 1 + } + b = append(b, msg[p:]...) + if !hasHeader { + // Simplify string to a raw string. + Raw(string(b)).Compile(e) + } else if len(b) > 0 { + e.EncodeString(string(b)) + } + return err +} + +// Affix is a message that adds a prefix and suffix to another message. +// This is mostly used add back whitespace to a translation that was stripped +// before sending it out. +type Affix struct { + Message Message + Prefix string + Suffix string +} + +// Compile implements Message. +func (a Affix) Compile(e *Encoder) (err error) { + // TODO: consider adding a special message type that just adds a single + // return. This is probably common enough to handle the majority of cases. + // Get some stats first, though. + e.EncodeMessageType(msgAffix) + e.EncodeString(a.Prefix) + e.EncodeString(a.Suffix) + e.EncodeMessage(a.Message) + return nil +} diff --git a/vendor/golang.org/x/text/internal/catmsg/codec.go b/vendor/golang.org/x/text/internal/catmsg/codec.go new file mode 100644 index 00000000..547802b0 --- /dev/null +++ b/vendor/golang.org/x/text/internal/catmsg/codec.go @@ -0,0 +1,407 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package catmsg + +import ( + "errors" + "fmt" + + "golang.org/x/text/language" +) + +// A Renderer renders a Message. +type Renderer interface { + // Render renders the given string. The given string may be interpreted as a + // format string, such as the one used by the fmt package or a template. + Render(s string) + + // Arg returns the i-th argument passed to format a message. This method + // should return nil if there is no such argument. Messages need access to + // arguments to allow selecting a message based on linguistic features of + // those arguments. + Arg(i int) interface{} +} + +// A Dictionary specifies a source of messages, including variables or macros. +type Dictionary interface { + // Lookup returns the message for the given key. It returns false for ok if + // such a message could not be found. + Lookup(key string) (data string, ok bool) + + // TODO: consider returning an interface, instead of a string. This will + // allow implementations to do their own message type decoding. +} + +// An Encoder serializes a Message to a string. +type Encoder struct { + // The root encoder is used for storing encoded variables. + root *Encoder + // The parent encoder provides the surrounding scopes for resolving variable + // names. + parent *Encoder + + tag language.Tag + + // buf holds the encoded message so far. After a message completes encoding, + // the contents of buf, prefixed by the encoded length, are flushed to the + // parent buffer. + buf []byte + + // vars is the lookup table of variables in the current scope. + vars []keyVal + + err error + inBody bool // if false next call must be EncodeMessageType +} + +type keyVal struct { + key string + offset int +} + +// Language reports the language for which the encoded message will be stored +// in the Catalog. +func (e *Encoder) Language() language.Tag { return e.tag } + +func (e *Encoder) setError(err error) { + if e.root.err == nil { + e.root.err = err + } +} + +// EncodeUint encodes x. +func (e *Encoder) EncodeUint(x uint64) { + e.checkInBody() + var buf [maxVarintBytes]byte + n := encodeUint(buf[:], x) + e.buf = append(e.buf, buf[:n]...) +} + +// EncodeString encodes s. +func (e *Encoder) EncodeString(s string) { + e.checkInBody() + e.EncodeUint(uint64(len(s))) + e.buf = append(e.buf, s...) +} + +// EncodeMessageType marks the current message to be of type h. +// +// It must be the first call of a Message's Compile method. +func (e *Encoder) EncodeMessageType(h Handle) { + if e.inBody { + panic("catmsg: EncodeMessageType not the first method called") + } + e.inBody = true + e.EncodeUint(uint64(h)) +} + +// EncodeMessage serializes the given message inline at the current position. +func (e *Encoder) EncodeMessage(m Message) error { + e = &Encoder{root: e.root, parent: e, tag: e.tag} + err := m.Compile(e) + if _, ok := m.(*Var); !ok { + e.flushTo(e.parent) + } + return err +} + +func (e *Encoder) checkInBody() { + if !e.inBody { + panic("catmsg: expected prior call to EncodeMessageType") + } +} + +// stripPrefix indicates the number of prefix bytes that must be stripped to +// turn a single-element sequence into a message that is just this single member +// without its size prefix. If the message can be stripped, b[1:n] contains the +// size prefix. +func stripPrefix(b []byte) (n int) { + if len(b) > 0 && Handle(b[0]) == msgFirst { + x, n, _ := decodeUint(b[1:]) + if 1+n+int(x) == len(b) { + return 1 + n + } + } + return 0 +} + +func (e *Encoder) flushTo(dst *Encoder) { + data := e.buf + p := stripPrefix(data) + if p > 0 { + data = data[1:] + } else { + // Prefix the size. + dst.EncodeUint(uint64(len(data))) + } + dst.buf = append(dst.buf, data...) +} + +func (e *Encoder) addVar(key string, m Message) error { + for _, v := range e.parent.vars { + if v.key == key { + err := fmt.Errorf("catmsg: duplicate variable %q", key) + e.setError(err) + return err + } + } + scope := e.parent + // If a variable message is Incomplete, and does not evaluate to a message + // during execution, we fall back to the variable name. We encode this by + // appending the variable name if the message reports it's incomplete. + + err := m.Compile(e) + if err != ErrIncomplete { + e.setError(err) + } + switch { + case len(e.buf) == 1 && Handle(e.buf[0]) == msgFirst: // empty sequence + e.buf = e.buf[:0] + e.inBody = false + fallthrough + case len(e.buf) == 0: + // Empty message. + if err := String(key).Compile(e); err != nil { + e.setError(err) + } + case err == ErrIncomplete: + if Handle(e.buf[0]) != msgFirst { + seq := &Encoder{root: e.root, parent: e} + seq.EncodeMessageType(msgFirst) + e.flushTo(seq) + e = seq + } + // e contains a sequence; append the fallback string. + e.EncodeMessage(String(key)) + } + + // Flush result to variable heap. + offset := len(e.root.buf) + e.flushTo(e.root) + e.buf = e.buf[:0] + + // Record variable offset in current scope. + scope.vars = append(scope.vars, keyVal{key: key, offset: offset}) + return err +} + +const ( + substituteVar = iota + substituteMacro + substituteError +) + +// EncodeSubstitution inserts a resolved reference to a variable or macro. +// +// This call must be matched with a call to ExecuteSubstitution at decoding +// time. +func (e *Encoder) EncodeSubstitution(name string, arguments ...int) { + if arity := len(arguments); arity > 0 { + // TODO: also resolve macros. + e.EncodeUint(substituteMacro) + e.EncodeString(name) + for _, a := range arguments { + e.EncodeUint(uint64(a)) + } + return + } + for scope := e; scope != nil; scope = scope.parent { + for _, v := range scope.vars { + if v.key != name { + continue + } + e.EncodeUint(substituteVar) // TODO: support arity > 0 + e.EncodeUint(uint64(v.offset)) + return + } + } + // TODO: refer to dictionary-wide scoped variables. + e.EncodeUint(substituteError) + e.EncodeString(name) + e.setError(fmt.Errorf("catmsg: unknown var %q", name)) +} + +// A Decoder deserializes and evaluates messages that are encoded by an encoder. +type Decoder struct { + tag language.Tag + dst Renderer + macros Dictionary + + err error + vars string + data string + + macroArg int // TODO: allow more than one argument +} + +// NewDecoder returns a new Decoder. +// +// Decoders are designed to be reused for multiple invocations of Execute. +// Only one goroutine may call Execute concurrently. +func NewDecoder(tag language.Tag, r Renderer, macros Dictionary) *Decoder { + return &Decoder{ + tag: tag, + dst: r, + macros: macros, + } +} + +func (d *Decoder) setError(err error) { + if d.err == nil { + d.err = err + } +} + +// Language returns the language in which the message is being rendered. +// +// The destination language may be a child language of the language used for +// encoding. For instance, a decoding language of "pt-PT" is consistent with an +// encoding language of "pt". +func (d *Decoder) Language() language.Tag { return d.tag } + +// Done reports whether there are more bytes to process in this message. +func (d *Decoder) Done() bool { return len(d.data) == 0 } + +// Render implements Renderer. +func (d *Decoder) Render(s string) { d.dst.Render(s) } + +// Arg implements Renderer. +// +// During evaluation of macros, the argument positions may be mapped to +// arguments that differ from the original call. +func (d *Decoder) Arg(i int) interface{} { + if d.macroArg != 0 { + if i != 1 { + panic("catmsg: only macros with single argument supported") + } + i = d.macroArg + } + return d.dst.Arg(i) +} + +// DecodeUint decodes a number that was encoded with EncodeUint and advances the +// position. +func (d *Decoder) DecodeUint() uint64 { + x, n, err := decodeUintString(d.data) + d.data = d.data[n:] + if err != nil { + d.setError(err) + } + return x +} + +// DecodeString decodes a string that was encoded with EncodeString and advances +// the position. +func (d *Decoder) DecodeString() string { + size := d.DecodeUint() + s := d.data[:size] + d.data = d.data[size:] + return s +} + +// SkipMessage skips the message at the current location and advances the +// position. +func (d *Decoder) SkipMessage() { + n := int(d.DecodeUint()) + d.data = d.data[n:] +} + +// Execute decodes and evaluates msg. +// +// Only one goroutine may call execute. +func (d *Decoder) Execute(msg string) error { + d.err = nil + if !d.execute(msg) { + return ErrNoMatch + } + return d.err +} + +func (d *Decoder) execute(msg string) bool { + saved := d.data + d.data = msg + ok := d.executeMessage() + d.data = saved + return ok +} + +// executeMessageFromData is like execute, but also decodes a leading message +// size and clips the given string accordingly. +// +// It reports the number of bytes consumed and whether a message was selected. +func (d *Decoder) executeMessageFromData(s string) (n int, ok bool) { + saved := d.data + d.data = s + size := int(d.DecodeUint()) + n = len(s) - len(d.data) + // Sanitize the setting. This allows skipping a size argument for + // RawString and method Done. + d.data = d.data[:size] + ok = d.executeMessage() + n += size - len(d.data) + d.data = saved + return n, ok +} + +var errUnknownHandler = errors.New("catmsg: string contains unsupported handler") + +// executeMessage reads the handle id, initializes the decoder and executes the +// message. It is assumed that all of d.data[d.p:] is the single message. +func (d *Decoder) executeMessage() bool { + if d.Done() { + // We interpret no data as a valid empty message. + return true + } + handle := d.DecodeUint() + + var fn Handler + mutex.Lock() + if int(handle) < len(handlers) { + fn = handlers[handle] + } + mutex.Unlock() + if fn == nil { + d.setError(errUnknownHandler) + d.execute(fmt.Sprintf("\x02$!(UNKNOWNMSGHANDLER=%#x)", handle)) + return true + } + return fn(d) +} + +// ExecuteMessage decodes and executes the message at the current position. +func (d *Decoder) ExecuteMessage() bool { + n, ok := d.executeMessageFromData(d.data) + d.data = d.data[n:] + return ok +} + +// ExecuteSubstitution executes the message corresponding to the substitution +// as encoded by EncodeSubstitution. +func (d *Decoder) ExecuteSubstitution() { + switch x := d.DecodeUint(); x { + case substituteVar: + offset := d.DecodeUint() + d.executeMessageFromData(d.vars[offset:]) + case substituteMacro: + name := d.DecodeString() + data, ok := d.macros.Lookup(name) + old := d.macroArg + // TODO: support macros of arity other than 1. + d.macroArg = int(d.DecodeUint()) + switch { + case !ok: + // TODO: detect this at creation time. + d.setError(fmt.Errorf("catmsg: undefined macro %q", name)) + fallthrough + case !d.execute(data): + d.dst.Render(name) // fall back to macro name. + } + d.macroArg = old + case substituteError: + d.dst.Render(d.DecodeString()) + default: + panic("catmsg: unreachable") + } +} diff --git a/vendor/golang.org/x/text/internal/catmsg/varint.go b/vendor/golang.org/x/text/internal/catmsg/varint.go new file mode 100644 index 00000000..a2cee2cf --- /dev/null +++ b/vendor/golang.org/x/text/internal/catmsg/varint.go @@ -0,0 +1,62 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package catmsg + +// This file implements varint encoding analogous to the one in encoding/binary. +// We need a string version of this function, so we add that here and then add +// the rest for consistency. + +import "errors" + +var ( + errIllegalVarint = errors.New("catmsg: illegal varint") + errVarintTooLarge = errors.New("catmsg: varint too large for uint64") +) + +const maxVarintBytes = 10 // maximum length of a varint + +// encodeUint encodes x as a variable-sized integer into buf and returns the +// number of bytes written. buf must be at least maxVarintBytes long +func encodeUint(buf []byte, x uint64) (n int) { + for ; x > 127; n++ { + buf[n] = 0x80 | uint8(x&0x7F) + x >>= 7 + } + buf[n] = uint8(x) + n++ + return n +} + +func decodeUintString(s string) (x uint64, size int, err error) { + i := 0 + for shift := uint(0); shift < 64; shift += 7 { + if i >= len(s) { + return 0, i, errIllegalVarint + } + b := uint64(s[i]) + i++ + x |= (b & 0x7F) << shift + if b&0x80 == 0 { + return x, i, nil + } + } + return 0, i, errVarintTooLarge +} + +func decodeUint(b []byte) (x uint64, size int, err error) { + i := 0 + for shift := uint(0); shift < 64; shift += 7 { + if i >= len(b) { + return 0, i, errIllegalVarint + } + c := uint64(b[i]) + i++ + x |= (c & 0x7F) << shift + if c&0x80 == 0 { + return x, i, nil + } + } + return 0, i, errVarintTooLarge +} diff --git a/vendor/golang.org/x/text/internal/format/format.go b/vendor/golang.org/x/text/internal/format/format.go new file mode 100644 index 00000000..ee1c57a3 --- /dev/null +++ b/vendor/golang.org/x/text/internal/format/format.go @@ -0,0 +1,41 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package format contains types for defining language-specific formatting of +// values. +// +// This package is internal now, but will eventually be exposed after the API +// settles. +package format // import "golang.org/x/text/internal/format" + +import ( + "fmt" + + "golang.org/x/text/language" +) + +// State represents the printer state passed to custom formatters. It provides +// access to the fmt.State interface and the sentence and language-related +// context. +type State interface { + fmt.State + + // Language reports the requested language in which to render a message. + Language() language.Tag + + // TODO: consider this and removing rune from the Format method in the + // Formatter interface. + // + // Verb returns the format variant to render, analogous to the types used + // in fmt. Use 'v' for the default or only variant. + // Verb() rune + + // TODO: more info: + // - sentence context such as linguistic features passed by the translator. +} + +// Formatter is analogous to fmt.Formatter. +type Formatter interface { + Format(state State, verb rune) +} diff --git a/vendor/golang.org/x/text/internal/format/parser.go b/vendor/golang.org/x/text/internal/format/parser.go new file mode 100644 index 00000000..855aed71 --- /dev/null +++ b/vendor/golang.org/x/text/internal/format/parser.go @@ -0,0 +1,358 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package format + +import ( + "reflect" + "unicode/utf8" +) + +// A Parser parses a format string. The result from the parse are set in the +// struct fields. +type Parser struct { + Verb rune + + WidthPresent bool + PrecPresent bool + Minus bool + Plus bool + Sharp bool + Space bool + Zero bool + + // For the formats %+v %#v, we set the plusV/sharpV flags + // and clear the plus/sharp flags since %+v and %#v are in effect + // different, flagless formats set at the top level. + PlusV bool + SharpV bool + + HasIndex bool + + Width int + Prec int // precision + + // retain arguments across calls. + Args []interface{} + // retain current argument number across calls + ArgNum int + + // reordered records whether the format string used argument reordering. + Reordered bool + // goodArgNum records whether the most recent reordering directive was valid. + goodArgNum bool + + // position info + format string + startPos int + endPos int + Status Status +} + +// Reset initializes a parser to scan format strings for the given args. +func (p *Parser) Reset(args []interface{}) { + p.Args = args + p.ArgNum = 0 + p.startPos = 0 + p.Reordered = false +} + +// Text returns the part of the format string that was parsed by the last call +// to Scan. It returns the original substitution clause if the current scan +// parsed a substitution. +func (p *Parser) Text() string { return p.format[p.startPos:p.endPos] } + +// SetFormat sets a new format string to parse. It does not reset the argument +// count. +func (p *Parser) SetFormat(format string) { + p.format = format + p.startPos = 0 + p.endPos = 0 +} + +// Status indicates the result type of a call to Scan. +type Status int + +const ( + StatusText Status = iota + StatusSubstitution + StatusBadWidthSubstitution + StatusBadPrecSubstitution + StatusNoVerb + StatusBadArgNum + StatusMissingArg +) + +// ClearFlags reset the parser to default behavior. +func (p *Parser) ClearFlags() { + p.WidthPresent = false + p.PrecPresent = false + p.Minus = false + p.Plus = false + p.Sharp = false + p.Space = false + p.Zero = false + + p.PlusV = false + p.SharpV = false + + p.HasIndex = false +} + +// Scan scans the next part of the format string and sets the status to +// indicate whether it scanned a string literal, substitution or error. +func (p *Parser) Scan() bool { + p.Status = StatusText + format := p.format + end := len(format) + if p.endPos >= end { + return false + } + afterIndex := false // previous item in format was an index like [3]. + + p.startPos = p.endPos + p.goodArgNum = true + i := p.startPos + for i < end && format[i] != '%' { + i++ + } + if i > p.startPos { + p.endPos = i + return true + } + // Process one verb + i++ + + p.Status = StatusSubstitution + + // Do we have flags? + p.ClearFlags() + +simpleFormat: + for ; i < end; i++ { + c := p.format[i] + switch c { + case '#': + p.Sharp = true + case '0': + p.Zero = !p.Minus // Only allow zero padding to the left. + case '+': + p.Plus = true + case '-': + p.Minus = true + p.Zero = false // Do not pad with zeros to the right. + case ' ': + p.Space = true + default: + // Fast path for common case of ascii lower case simple verbs + // without precision or width or argument indices. + if 'a' <= c && c <= 'z' && p.ArgNum < len(p.Args) { + if c == 'v' { + // Go syntax + p.SharpV = p.Sharp + p.Sharp = false + // Struct-field syntax + p.PlusV = p.Plus + p.Plus = false + } + p.Verb = rune(c) + p.ArgNum++ + p.endPos = i + 1 + return true + } + // Format is more complex than simple flags and a verb or is malformed. + break simpleFormat + } + } + + // Do we have an explicit argument index? + i, afterIndex = p.updateArgNumber(format, i) + + // Do we have width? + if i < end && format[i] == '*' { + i++ + p.Width, p.WidthPresent = p.intFromArg() + + if !p.WidthPresent { + p.Status = StatusBadWidthSubstitution + } + + // We have a negative width, so take its value and ensure + // that the minus flag is set + if p.Width < 0 { + p.Width = -p.Width + p.Minus = true + p.Zero = false // Do not pad with zeros to the right. + } + afterIndex = false + } else { + p.Width, p.WidthPresent, i = parsenum(format, i, end) + if afterIndex && p.WidthPresent { // "%[3]2d" + p.goodArgNum = false + } + } + + // Do we have precision? + if i+1 < end && format[i] == '.' { + i++ + if afterIndex { // "%[3].2d" + p.goodArgNum = false + } + i, afterIndex = p.updateArgNumber(format, i) + if i < end && format[i] == '*' { + i++ + p.Prec, p.PrecPresent = p.intFromArg() + // Negative precision arguments don't make sense + if p.Prec < 0 { + p.Prec = 0 + p.PrecPresent = false + } + if !p.PrecPresent { + p.Status = StatusBadPrecSubstitution + } + afterIndex = false + } else { + p.Prec, p.PrecPresent, i = parsenum(format, i, end) + if !p.PrecPresent { + p.Prec = 0 + p.PrecPresent = true + } + } + } + + if !afterIndex { + i, afterIndex = p.updateArgNumber(format, i) + } + p.HasIndex = afterIndex + + if i >= end { + p.endPos = i + p.Status = StatusNoVerb + return true + } + + verb, w := utf8.DecodeRuneInString(format[i:]) + p.endPos = i + w + p.Verb = verb + + switch { + case verb == '%': // Percent does not absorb operands and ignores f.wid and f.prec. + p.startPos = p.endPos - 1 + p.Status = StatusText + case !p.goodArgNum: + p.Status = StatusBadArgNum + case p.ArgNum >= len(p.Args): // No argument left over to print for the current verb. + p.Status = StatusMissingArg + p.ArgNum++ + case verb == 'v': + // Go syntax + p.SharpV = p.Sharp + p.Sharp = false + // Struct-field syntax + p.PlusV = p.Plus + p.Plus = false + fallthrough + default: + p.ArgNum++ + } + return true +} + +// intFromArg gets the ArgNumth element of Args. On return, isInt reports +// whether the argument has integer type. +func (p *Parser) intFromArg() (num int, isInt bool) { + if p.ArgNum < len(p.Args) { + arg := p.Args[p.ArgNum] + num, isInt = arg.(int) // Almost always OK. + if !isInt { + // Work harder. + switch v := reflect.ValueOf(arg); v.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + n := v.Int() + if int64(int(n)) == n { + num = int(n) + isInt = true + } + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + n := v.Uint() + if int64(n) >= 0 && uint64(int(n)) == n { + num = int(n) + isInt = true + } + default: + // Already 0, false. + } + } + p.ArgNum++ + if tooLarge(num) { + num = 0 + isInt = false + } + } + return +} + +// parseArgNumber returns the value of the bracketed number, minus 1 +// (explicit argument numbers are one-indexed but we want zero-indexed). +// The opening bracket is known to be present at format[0]. +// The returned values are the index, the number of bytes to consume +// up to the closing paren, if present, and whether the number parsed +// ok. The bytes to consume will be 1 if no closing paren is present. +func parseArgNumber(format string) (index int, wid int, ok bool) { + // There must be at least 3 bytes: [n]. + if len(format) < 3 { + return 0, 1, false + } + + // Find closing bracket. + for i := 1; i < len(format); i++ { + if format[i] == ']' { + width, ok, newi := parsenum(format, 1, i) + if !ok || newi != i { + return 0, i + 1, false + } + return width - 1, i + 1, true // arg numbers are one-indexed and skip paren. + } + } + return 0, 1, false +} + +// updateArgNumber returns the next argument to evaluate, which is either the value of the passed-in +// argNum or the value of the bracketed integer that begins format[i:]. It also returns +// the new value of i, that is, the index of the next byte of the format to process. +func (p *Parser) updateArgNumber(format string, i int) (newi int, found bool) { + if len(format) <= i || format[i] != '[' { + return i, false + } + p.Reordered = true + index, wid, ok := parseArgNumber(format[i:]) + if ok && 0 <= index && index < len(p.Args) { + p.ArgNum = index + return i + wid, true + } + p.goodArgNum = false + return i + wid, ok +} + +// tooLarge reports whether the magnitude of the integer is +// too large to be used as a formatting width or precision. +func tooLarge(x int) bool { + const max int = 1e6 + return x > max || x < -max +} + +// parsenum converts ASCII to integer. num is 0 (and isnum is false) if no number present. +func parsenum(s string, start, end int) (num int, isnum bool, newi int) { + if start >= end { + return 0, false, end + } + for newi = start; newi < end && '0' <= s[newi] && s[newi] <= '9'; newi++ { + if tooLarge(num) { + return 0, false, end // Overflow; crazy long number most likely. + } + num = num*10 + int(s[newi]-'0') + isnum = true + } + return +} diff --git a/vendor/golang.org/x/text/internal/number/common.go b/vendor/golang.org/x/text/internal/number/common.go new file mode 100644 index 00000000..a6e9c8e0 --- /dev/null +++ b/vendor/golang.org/x/text/internal/number/common.go @@ -0,0 +1,55 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +package number + +import ( + "unicode/utf8" + + "golang.org/x/text/internal/language/compact" +) + +// A system identifies a CLDR numbering system. +type system byte + +type systemData struct { + id system + digitSize byte // number of UTF-8 bytes per digit + zero [utf8.UTFMax]byte // UTF-8 sequence of zero digit. +} + +// A SymbolType identifies a symbol of a specific kind. +type SymbolType int + +const ( + SymDecimal SymbolType = iota + SymGroup + SymList + SymPercentSign + SymPlusSign + SymMinusSign + SymExponential + SymSuperscriptingExponent + SymPerMille + SymInfinity + SymNan + SymTimeSeparator + + NumSymbolTypes +) + +const hasNonLatnMask = 0x8000 + +// symOffset is an offset into altSymData if the bit indicated by hasNonLatnMask +// is not 0 (with this bit masked out), and an offset into symIndex otherwise. +// +// TODO: this type can be a byte again if we use an indirection into altsymData +// and introduce an alt -> offset slice (the length of this will be number of +// alternatives plus 1). This also allows getting rid of the compactTag field +// in altSymData. In total this will save about 1K. +type symOffset uint16 + +type altSymData struct { + compactTag compact.ID + symIndex symOffset + system system +} diff --git a/vendor/golang.org/x/text/internal/number/decimal.go b/vendor/golang.org/x/text/internal/number/decimal.go new file mode 100644 index 00000000..e128cf34 --- /dev/null +++ b/vendor/golang.org/x/text/internal/number/decimal.go @@ -0,0 +1,500 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:generate stringer -type RoundingMode + +package number + +import ( + "math" + "strconv" +) + +// RoundingMode determines how a number is rounded to the desired precision. +type RoundingMode byte + +const ( + ToNearestEven RoundingMode = iota // towards the nearest integer, or towards an even number if equidistant. + ToNearestZero // towards the nearest integer, or towards zero if equidistant. + ToNearestAway // towards the nearest integer, or away from zero if equidistant. + ToPositiveInf // towards infinity + ToNegativeInf // towards negative infinity + ToZero // towards zero + AwayFromZero // away from zero + numModes +) + +const maxIntDigits = 20 + +// A Decimal represents a floating point number in decimal format. +// Digits represents a number [0, 1.0), and the absolute value represented by +// Decimal is Digits * 10^Exp. Leading and trailing zeros may be omitted and Exp +// may point outside a valid position in Digits. +// +// Examples: +// +// Number Decimal +// 12345 Digits: [1, 2, 3, 4, 5], Exp: 5 +// 12.345 Digits: [1, 2, 3, 4, 5], Exp: 2 +// 12000 Digits: [1, 2], Exp: 5 +// 12000.00 Digits: [1, 2], Exp: 5 +// 0.00123 Digits: [1, 2, 3], Exp: -2 +// 0 Digits: [], Exp: 0 +type Decimal struct { + digits + + buf [maxIntDigits]byte +} + +type digits struct { + Digits []byte // mantissa digits, big-endian + Exp int32 // exponent + Neg bool + Inf bool // Takes precedence over Digits and Exp. + NaN bool // Takes precedence over Inf. +} + +// Digits represents a floating point number represented in digits of the +// base in which a number is to be displayed. It is similar to Decimal, but +// keeps track of trailing fraction zeros and the comma placement for +// engineering notation. Digits must have at least one digit. +// +// Examples: +// +// Number Decimal +// decimal +// 12345 Digits: [1, 2, 3, 4, 5], Exp: 5 End: 5 +// 12.345 Digits: [1, 2, 3, 4, 5], Exp: 2 End: 5 +// 12000 Digits: [1, 2], Exp: 5 End: 5 +// 12000.00 Digits: [1, 2], Exp: 5 End: 7 +// 0.00123 Digits: [1, 2, 3], Exp: -2 End: 3 +// 0 Digits: [], Exp: 0 End: 1 +// scientific (actual exp is Exp - Comma) +// 0e0 Digits: [0], Exp: 1, End: 1, Comma: 1 +// .0e0 Digits: [0], Exp: 0, End: 1, Comma: 0 +// 0.0e0 Digits: [0], Exp: 1, End: 2, Comma: 1 +// 1.23e4 Digits: [1, 2, 3], Exp: 5, End: 3, Comma: 1 +// .123e5 Digits: [1, 2, 3], Exp: 5, End: 3, Comma: 0 +// engineering +// 12.3e3 Digits: [1, 2, 3], Exp: 5, End: 3, Comma: 2 +type Digits struct { + digits + // End indicates the end position of the number. + End int32 // For decimals Exp <= End. For scientific len(Digits) <= End. + // Comma is used for the comma position for scientific (always 0 or 1) and + // engineering notation (always 0, 1, 2, or 3). + Comma uint8 + // IsScientific indicates whether this number is to be rendered as a + // scientific number. + IsScientific bool +} + +func (d *Digits) NumFracDigits() int { + if d.Exp >= d.End { + return 0 + } + return int(d.End - d.Exp) +} + +// normalize returns a new Decimal with leading and trailing zeros removed. +func (d *Decimal) normalize() (n Decimal) { + n = *d + b := n.Digits + // Strip leading zeros. Resulting number of digits is significant digits. + for len(b) > 0 && b[0] == 0 { + b = b[1:] + n.Exp-- + } + // Strip trailing zeros + for len(b) > 0 && b[len(b)-1] == 0 { + b = b[:len(b)-1] + } + if len(b) == 0 { + n.Exp = 0 + } + n.Digits = b + return n +} + +func (d *Decimal) clear() { + b := d.Digits + if b == nil { + b = d.buf[:0] + } + *d = Decimal{} + d.Digits = b[:0] +} + +func (x *Decimal) String() string { + if x.NaN { + return "NaN" + } + var buf []byte + if x.Neg { + buf = append(buf, '-') + } + if x.Inf { + buf = append(buf, "Inf"...) + return string(buf) + } + switch { + case len(x.Digits) == 0: + buf = append(buf, '0') + case x.Exp <= 0: + // 0.00ddd + buf = append(buf, "0."...) + buf = appendZeros(buf, -int(x.Exp)) + buf = appendDigits(buf, x.Digits) + + case /* 0 < */ int(x.Exp) < len(x.Digits): + // dd.ddd + buf = appendDigits(buf, x.Digits[:x.Exp]) + buf = append(buf, '.') + buf = appendDigits(buf, x.Digits[x.Exp:]) + + default: // len(x.Digits) <= x.Exp + // ddd00 + buf = appendDigits(buf, x.Digits) + buf = appendZeros(buf, int(x.Exp)-len(x.Digits)) + } + return string(buf) +} + +func appendDigits(buf []byte, digits []byte) []byte { + for _, c := range digits { + buf = append(buf, c+'0') + } + return buf +} + +// appendZeros appends n 0 digits to buf and returns buf. +func appendZeros(buf []byte, n int) []byte { + for ; n > 0; n-- { + buf = append(buf, '0') + } + return buf +} + +func (d *digits) round(mode RoundingMode, n int) { + if n >= len(d.Digits) { + return + } + // Make rounding decision: The result mantissa is truncated ("rounded down") + // by default. Decide if we need to increment, or "round up", the (unsigned) + // mantissa. + inc := false + switch mode { + case ToNegativeInf: + inc = d.Neg + case ToPositiveInf: + inc = !d.Neg + case ToZero: + // nothing to do + case AwayFromZero: + inc = true + case ToNearestEven: + inc = d.Digits[n] > 5 || d.Digits[n] == 5 && + (len(d.Digits) > n+1 || n == 0 || d.Digits[n-1]&1 != 0) + case ToNearestAway: + inc = d.Digits[n] >= 5 + case ToNearestZero: + inc = d.Digits[n] > 5 || d.Digits[n] == 5 && len(d.Digits) > n+1 + default: + panic("unreachable") + } + if inc { + d.roundUp(n) + } else { + d.roundDown(n) + } +} + +// roundFloat rounds a floating point number. +func (r RoundingMode) roundFloat(x float64) float64 { + // Make rounding decision: The result mantissa is truncated ("rounded down") + // by default. Decide if we need to increment, or "round up", the (unsigned) + // mantissa. + abs := x + if x < 0 { + abs = -x + } + i, f := math.Modf(abs) + if f == 0.0 { + return x + } + inc := false + switch r { + case ToNegativeInf: + inc = x < 0 + case ToPositiveInf: + inc = x >= 0 + case ToZero: + // nothing to do + case AwayFromZero: + inc = true + case ToNearestEven: + // TODO: check overflow + inc = f > 0.5 || f == 0.5 && int64(i)&1 != 0 + case ToNearestAway: + inc = f >= 0.5 + case ToNearestZero: + inc = f > 0.5 + default: + panic("unreachable") + } + if inc { + i += 1 + } + if abs != x { + i = -i + } + return i +} + +func (x *digits) roundUp(n int) { + if n < 0 || n >= len(x.Digits) { + return // nothing to do + } + // find first digit < 9 + for n > 0 && x.Digits[n-1] >= 9 { + n-- + } + + if n == 0 { + // all digits are 9s => round up to 1 and update exponent + x.Digits[0] = 1 // ok since len(x.Digits) > n + x.Digits = x.Digits[:1] + x.Exp++ + return + } + x.Digits[n-1]++ + x.Digits = x.Digits[:n] + // x already trimmed +} + +func (x *digits) roundDown(n int) { + if n < 0 || n >= len(x.Digits) { + return // nothing to do + } + x.Digits = x.Digits[:n] + trim(x) +} + +// trim cuts off any trailing zeros from x's mantissa; +// they are meaningless for the value of x. +func trim(x *digits) { + i := len(x.Digits) + for i > 0 && x.Digits[i-1] == 0 { + i-- + } + x.Digits = x.Digits[:i] + if i == 0 { + x.Exp = 0 + } +} + +// A Converter converts a number into decimals according to the given rounding +// criteria. +type Converter interface { + Convert(d *Decimal, r RoundingContext) +} + +const ( + signed = true + unsigned = false +) + +// Convert converts the given number to the decimal representation using the +// supplied RoundingContext. +func (d *Decimal) Convert(r RoundingContext, number interface{}) { + switch f := number.(type) { + case Converter: + d.clear() + f.Convert(d, r) + case float32: + d.ConvertFloat(r, float64(f), 32) + case float64: + d.ConvertFloat(r, f, 64) + case int: + d.ConvertInt(r, signed, uint64(f)) + case int8: + d.ConvertInt(r, signed, uint64(f)) + case int16: + d.ConvertInt(r, signed, uint64(f)) + case int32: + d.ConvertInt(r, signed, uint64(f)) + case int64: + d.ConvertInt(r, signed, uint64(f)) + case uint: + d.ConvertInt(r, unsigned, uint64(f)) + case uint8: + d.ConvertInt(r, unsigned, uint64(f)) + case uint16: + d.ConvertInt(r, unsigned, uint64(f)) + case uint32: + d.ConvertInt(r, unsigned, uint64(f)) + case uint64: + d.ConvertInt(r, unsigned, f) + + default: + d.NaN = true + // TODO: + // case string: if produced by strconv, allows for easy arbitrary pos. + // case reflect.Value: + // case big.Float + // case big.Int + // case big.Rat? + // catch underlyings using reflect or will this already be done by the + // message package? + } +} + +// ConvertInt converts an integer to decimals. +func (d *Decimal) ConvertInt(r RoundingContext, signed bool, x uint64) { + if r.Increment > 0 { + // TODO: if uint64 is too large, fall back to float64 + if signed { + d.ConvertFloat(r, float64(int64(x)), 64) + } else { + d.ConvertFloat(r, float64(x), 64) + } + return + } + d.clear() + if signed && int64(x) < 0 { + x = uint64(-int64(x)) + d.Neg = true + } + d.fillIntDigits(x) + d.Exp = int32(len(d.Digits)) +} + +// ConvertFloat converts a floating point number to decimals. +func (d *Decimal) ConvertFloat(r RoundingContext, x float64, size int) { + d.clear() + if math.IsNaN(x) { + d.NaN = true + return + } + // Simple case: decimal notation + if r.Increment > 0 { + scale := int(r.IncrementScale) + mult := 1.0 + if scale >= len(scales) { + mult = math.Pow(10, float64(scale)) + } else { + mult = scales[scale] + } + // We multiply x instead of dividing inc as it gives less rounding + // issues. + x *= mult + x /= float64(r.Increment) + x = r.Mode.roundFloat(x) + x *= float64(r.Increment) + x /= mult + } + + abs := x + if x < 0 { + d.Neg = true + abs = -x + } + if math.IsInf(abs, 1) { + d.Inf = true + return + } + + // By default we get the exact decimal representation. + verb := byte('g') + prec := -1 + // As the strconv API does not return the rounding accuracy, we can only + // round using ToNearestEven. + if r.Mode == ToNearestEven { + if n := r.RoundSignificantDigits(); n >= 0 { + prec = n + } else if n = r.RoundFractionDigits(); n >= 0 { + prec = n + verb = 'f' + } + } else { + // TODO: At this point strconv's rounding is imprecise to the point that + // it is not usable for this purpose. + // See https://github.com/golang/go/issues/21714 + // If rounding is requested, we ask for a large number of digits and + // round from there to simulate rounding only once. + // Ideally we would have strconv export an AppendDigits that would take + // a rounding mode and/or return an accuracy. Something like this would + // work: + // AppendDigits(dst []byte, x float64, base, size, prec int) (digits []byte, exp, accuracy int) + hasPrec := r.RoundSignificantDigits() >= 0 + hasScale := r.RoundFractionDigits() >= 0 + if hasPrec || hasScale { + // prec is the number of mantissa bits plus some extra for safety. + // We need at least the number of mantissa bits as decimals to + // accurately represent the floating point without rounding, as each + // bit requires one more decimal to represent: 0.5, 0.25, 0.125, ... + prec = 60 + } + } + + b := strconv.AppendFloat(d.Digits[:0], abs, verb, prec, size) + i := 0 + k := 0 + beforeDot := 1 + for i < len(b) { + if c := b[i]; '0' <= c && c <= '9' { + b[k] = c - '0' + k++ + d.Exp += int32(beforeDot) + } else if c == '.' { + beforeDot = 0 + d.Exp = int32(k) + } else { + break + } + i++ + } + d.Digits = b[:k] + if i != len(b) { + i += len("e") + pSign := i + exp := 0 + for i++; i < len(b); i++ { + exp *= 10 + exp += int(b[i] - '0') + } + if b[pSign] == '-' { + exp = -exp + } + d.Exp = int32(exp) + 1 + } +} + +func (d *Decimal) fillIntDigits(x uint64) { + if cap(d.Digits) < maxIntDigits { + d.Digits = d.buf[:] + } else { + d.Digits = d.buf[:maxIntDigits] + } + i := 0 + for ; x > 0; x /= 10 { + d.Digits[i] = byte(x % 10) + i++ + } + d.Digits = d.Digits[:i] + for p := 0; p < i; p++ { + i-- + d.Digits[p], d.Digits[i] = d.Digits[i], d.Digits[p] + } +} + +var scales [70]float64 + +func init() { + x := 1.0 + for i := range scales { + scales[i] = x + x *= 10 + } +} diff --git a/vendor/golang.org/x/text/internal/number/format.go b/vendor/golang.org/x/text/internal/number/format.go new file mode 100644 index 00000000..1aadcf40 --- /dev/null +++ b/vendor/golang.org/x/text/internal/number/format.go @@ -0,0 +1,533 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package number + +import ( + "strconv" + "unicode/utf8" + + "golang.org/x/text/language" +) + +// TODO: +// - grouping of fractions +// - allow user-defined superscript notation (such as 4) +// - same for non-breaking spaces, like   + +// A VisibleDigits computes digits, comma placement and trailing zeros as they +// will be shown to the user. +type VisibleDigits interface { + Digits(buf []byte, t language.Tag, scale int) Digits + // TODO: Do we also need to add the verb or pass a format.State? +} + +// Formatting proceeds along the following lines: +// 0) Compose rounding information from format and context. +// 1) Convert a number into a Decimal. +// 2) Sanitize Decimal by adding trailing zeros, removing leading digits, and +// (non-increment) rounding. The Decimal that results from this is suitable +// for determining the plural form. +// 3) Render the Decimal in the localized form. + +// Formatter contains all the information needed to render a number. +type Formatter struct { + Pattern + Info +} + +func (f *Formatter) init(t language.Tag, index []uint8) { + f.Info = InfoFromTag(t) + f.Pattern = formats[index[tagToID(t)]] +} + +// InitPattern initializes a Formatter for the given Pattern. +func (f *Formatter) InitPattern(t language.Tag, pat *Pattern) { + f.Info = InfoFromTag(t) + f.Pattern = *pat +} + +// InitDecimal initializes a Formatter using the default Pattern for the given +// language. +func (f *Formatter) InitDecimal(t language.Tag) { + f.init(t, tagToDecimal) +} + +// InitScientific initializes a Formatter using the default Pattern for the +// given language. +func (f *Formatter) InitScientific(t language.Tag) { + f.init(t, tagToScientific) + f.Pattern.MinFractionDigits = 0 + f.Pattern.MaxFractionDigits = -1 +} + +// InitEngineering initializes a Formatter using the default Pattern for the +// given language. +func (f *Formatter) InitEngineering(t language.Tag) { + f.init(t, tagToScientific) + f.Pattern.MinFractionDigits = 0 + f.Pattern.MaxFractionDigits = -1 + f.Pattern.MaxIntegerDigits = 3 + f.Pattern.MinIntegerDigits = 1 +} + +// InitPercent initializes a Formatter using the default Pattern for the given +// language. +func (f *Formatter) InitPercent(t language.Tag) { + f.init(t, tagToPercent) +} + +// InitPerMille initializes a Formatter using the default Pattern for the given +// language. +func (f *Formatter) InitPerMille(t language.Tag) { + f.init(t, tagToPercent) + f.Pattern.DigitShift = 3 +} + +func (f *Formatter) Append(dst []byte, x interface{}) []byte { + var d Decimal + r := f.RoundingContext + d.Convert(r, x) + return f.Render(dst, FormatDigits(&d, r)) +} + +func FormatDigits(d *Decimal, r RoundingContext) Digits { + if r.isScientific() { + return scientificVisibleDigits(r, d) + } + return decimalVisibleDigits(r, d) +} + +func (f *Formatter) Format(dst []byte, d *Decimal) []byte { + return f.Render(dst, FormatDigits(d, f.RoundingContext)) +} + +func (f *Formatter) Render(dst []byte, d Digits) []byte { + var result []byte + var postPrefix, preSuffix int + if d.IsScientific { + result, postPrefix, preSuffix = appendScientific(dst, f, &d) + } else { + result, postPrefix, preSuffix = appendDecimal(dst, f, &d) + } + if f.PadRune == 0 { + return result + } + width := int(f.FormatWidth) + if count := utf8.RuneCount(result); count < width { + insertPos := 0 + switch f.Flags & PadMask { + case PadAfterPrefix: + insertPos = postPrefix + case PadBeforeSuffix: + insertPos = preSuffix + case PadAfterSuffix: + insertPos = len(result) + } + num := width - count + pad := [utf8.UTFMax]byte{' '} + sz := 1 + if r := f.PadRune; r != 0 { + sz = utf8.EncodeRune(pad[:], r) + } + extra := sz * num + if n := len(result) + extra; n < cap(result) { + result = result[:n] + copy(result[insertPos+extra:], result[insertPos:]) + } else { + buf := make([]byte, n) + copy(buf, result[:insertPos]) + copy(buf[insertPos+extra:], result[insertPos:]) + result = buf + } + for ; num > 0; num-- { + insertPos += copy(result[insertPos:], pad[:sz]) + } + } + return result +} + +// decimalVisibleDigits converts d according to the RoundingContext. Note that +// the exponent may change as a result of this operation. +func decimalVisibleDigits(r RoundingContext, d *Decimal) Digits { + if d.NaN || d.Inf { + return Digits{digits: digits{Neg: d.Neg, NaN: d.NaN, Inf: d.Inf}} + } + n := Digits{digits: d.normalize().digits} + + exp := n.Exp + exp += int32(r.DigitShift) + + // Cap integer digits. Remove *most-significant* digits. + if r.MaxIntegerDigits > 0 { + if p := int(exp) - int(r.MaxIntegerDigits); p > 0 { + if p > len(n.Digits) { + p = len(n.Digits) + } + if n.Digits = n.Digits[p:]; len(n.Digits) == 0 { + exp = 0 + } else { + exp -= int32(p) + } + // Strip leading zeros. + for len(n.Digits) > 0 && n.Digits[0] == 0 { + n.Digits = n.Digits[1:] + exp-- + } + } + } + + // Rounding if not already done by Convert. + p := len(n.Digits) + if maxSig := int(r.MaxSignificantDigits); maxSig > 0 { + p = maxSig + } + if maxFrac := int(r.MaxFractionDigits); maxFrac >= 0 { + if cap := int(exp) + maxFrac; cap < p { + p = int(exp) + maxFrac + } + if p < 0 { + p = 0 + } + } + n.round(r.Mode, p) + + // set End (trailing zeros) + n.End = int32(len(n.Digits)) + if n.End == 0 { + exp = 0 + if r.MinFractionDigits > 0 { + n.End = int32(r.MinFractionDigits) + } + if p := int32(r.MinSignificantDigits) - 1; p > n.End { + n.End = p + } + } else { + if end := exp + int32(r.MinFractionDigits); end > n.End { + n.End = end + } + if n.End < int32(r.MinSignificantDigits) { + n.End = int32(r.MinSignificantDigits) + } + } + n.Exp = exp + return n +} + +// appendDecimal appends a formatted number to dst. It returns two possible +// insertion points for padding. +func appendDecimal(dst []byte, f *Formatter, n *Digits) (b []byte, postPre, preSuf int) { + if dst, ok := f.renderSpecial(dst, n); ok { + return dst, 0, len(dst) + } + digits := n.Digits + exp := n.Exp + + // Split in integer and fraction part. + var intDigits, fracDigits []byte + numInt := 0 + numFrac := int(n.End - n.Exp) + if exp > 0 { + numInt = int(exp) + if int(exp) >= len(digits) { // ddddd | ddddd00 + intDigits = digits + } else { // ddd.dd + intDigits = digits[:exp] + fracDigits = digits[exp:] + } + } else { + fracDigits = digits + } + + neg := n.Neg + affix, suffix := f.getAffixes(neg) + dst = appendAffix(dst, f, affix, neg) + savedLen := len(dst) + + minInt := int(f.MinIntegerDigits) + if minInt == 0 && f.MinSignificantDigits > 0 { + minInt = 1 + } + // add leading zeros + for i := minInt; i > numInt; i-- { + dst = f.AppendDigit(dst, 0) + if f.needsSep(i) { + dst = append(dst, f.Symbol(SymGroup)...) + } + } + i := 0 + for ; i < len(intDigits); i++ { + dst = f.AppendDigit(dst, intDigits[i]) + if f.needsSep(numInt - i) { + dst = append(dst, f.Symbol(SymGroup)...) + } + } + for ; i < numInt; i++ { + dst = f.AppendDigit(dst, 0) + if f.needsSep(numInt - i) { + dst = append(dst, f.Symbol(SymGroup)...) + } + } + + if numFrac > 0 || f.Flags&AlwaysDecimalSeparator != 0 { + dst = append(dst, f.Symbol(SymDecimal)...) + } + // Add trailing zeros + i = 0 + for n := -int(n.Exp); i < n; i++ { + dst = f.AppendDigit(dst, 0) + } + for _, d := range fracDigits { + i++ + dst = f.AppendDigit(dst, d) + } + for ; i < numFrac; i++ { + dst = f.AppendDigit(dst, 0) + } + return appendAffix(dst, f, suffix, neg), savedLen, len(dst) +} + +func scientificVisibleDigits(r RoundingContext, d *Decimal) Digits { + if d.NaN || d.Inf { + return Digits{digits: digits{Neg: d.Neg, NaN: d.NaN, Inf: d.Inf}} + } + n := Digits{digits: d.normalize().digits, IsScientific: true} + + // Normalize to have at least one digit. This simplifies engineering + // notation. + if len(n.Digits) == 0 { + n.Digits = append(n.Digits, 0) + n.Exp = 1 + } + + // Significant digits are transformed by the parser for scientific notation + // and do not need to be handled here. + maxInt, numInt := int(r.MaxIntegerDigits), int(r.MinIntegerDigits) + if numInt == 0 { + numInt = 1 + } + + // If a maximum number of integers is specified, the minimum must be 1 + // and the exponent is grouped by this number (e.g. for engineering) + if maxInt > numInt { + // Correct the exponent to reflect a single integer digit. + numInt = 1 + // engineering + // 0.01234 ([12345]e-1) -> 1.2345e-2 12.345e-3 + // 12345 ([12345]e+5) -> 1.2345e4 12.345e3 + d := int(n.Exp-1) % maxInt + if d < 0 { + d += maxInt + } + numInt += d + } + + p := len(n.Digits) + if maxSig := int(r.MaxSignificantDigits); maxSig > 0 { + p = maxSig + } + if maxFrac := int(r.MaxFractionDigits); maxFrac >= 0 && numInt+maxFrac < p { + p = numInt + maxFrac + } + n.round(r.Mode, p) + + n.Comma = uint8(numInt) + n.End = int32(len(n.Digits)) + if minSig := int32(r.MinFractionDigits) + int32(numInt); n.End < minSig { + n.End = minSig + } + return n +} + +// appendScientific appends a formatted number to dst. It returns two possible +// insertion points for padding. +func appendScientific(dst []byte, f *Formatter, n *Digits) (b []byte, postPre, preSuf int) { + if dst, ok := f.renderSpecial(dst, n); ok { + return dst, 0, 0 + } + digits := n.Digits + numInt := int(n.Comma) + numFrac := int(n.End) - int(n.Comma) + + var intDigits, fracDigits []byte + if numInt <= len(digits) { + intDigits = digits[:numInt] + fracDigits = digits[numInt:] + } else { + intDigits = digits + } + neg := n.Neg + affix, suffix := f.getAffixes(neg) + dst = appendAffix(dst, f, affix, neg) + savedLen := len(dst) + + i := 0 + for ; i < len(intDigits); i++ { + dst = f.AppendDigit(dst, intDigits[i]) + if f.needsSep(numInt - i) { + dst = append(dst, f.Symbol(SymGroup)...) + } + } + for ; i < numInt; i++ { + dst = f.AppendDigit(dst, 0) + if f.needsSep(numInt - i) { + dst = append(dst, f.Symbol(SymGroup)...) + } + } + + if numFrac > 0 || f.Flags&AlwaysDecimalSeparator != 0 { + dst = append(dst, f.Symbol(SymDecimal)...) + } + i = 0 + for ; i < len(fracDigits); i++ { + dst = f.AppendDigit(dst, fracDigits[i]) + } + for ; i < numFrac; i++ { + dst = f.AppendDigit(dst, 0) + } + + // exp + buf := [12]byte{} + // TODO: use exponential if superscripting is not available (no Latin + // numbers or no tags) and use exponential in all other cases. + exp := n.Exp - int32(n.Comma) + exponential := f.Symbol(SymExponential) + if exponential == "E" { + dst = append(dst, f.Symbol(SymSuperscriptingExponent)...) + dst = f.AppendDigit(dst, 1) + dst = f.AppendDigit(dst, 0) + switch { + case exp < 0: + dst = append(dst, superMinus...) + exp = -exp + case f.Flags&AlwaysExpSign != 0: + dst = append(dst, superPlus...) + } + b = strconv.AppendUint(buf[:0], uint64(exp), 10) + for i := len(b); i < int(f.MinExponentDigits); i++ { + dst = append(dst, superDigits[0]...) + } + for _, c := range b { + dst = append(dst, superDigits[c-'0']...) + } + } else { + dst = append(dst, exponential...) + switch { + case exp < 0: + dst = append(dst, f.Symbol(SymMinusSign)...) + exp = -exp + case f.Flags&AlwaysExpSign != 0: + dst = append(dst, f.Symbol(SymPlusSign)...) + } + b = strconv.AppendUint(buf[:0], uint64(exp), 10) + for i := len(b); i < int(f.MinExponentDigits); i++ { + dst = f.AppendDigit(dst, 0) + } + for _, c := range b { + dst = f.AppendDigit(dst, c-'0') + } + } + return appendAffix(dst, f, suffix, neg), savedLen, len(dst) +} + +const ( + superMinus = "\u207B" // SUPERSCRIPT HYPHEN-MINUS + superPlus = "\u207A" // SUPERSCRIPT PLUS SIGN +) + +var ( + // Note: the digits are not sequential!!! + superDigits = []string{ + "\u2070", // SUPERSCRIPT DIGIT ZERO + "\u00B9", // SUPERSCRIPT DIGIT ONE + "\u00B2", // SUPERSCRIPT DIGIT TWO + "\u00B3", // SUPERSCRIPT DIGIT THREE + "\u2074", // SUPERSCRIPT DIGIT FOUR + "\u2075", // SUPERSCRIPT DIGIT FIVE + "\u2076", // SUPERSCRIPT DIGIT SIX + "\u2077", // SUPERSCRIPT DIGIT SEVEN + "\u2078", // SUPERSCRIPT DIGIT EIGHT + "\u2079", // SUPERSCRIPT DIGIT NINE + } +) + +func (f *Formatter) getAffixes(neg bool) (affix, suffix string) { + str := f.Affix + if str != "" { + if f.NegOffset > 0 { + if neg { + str = str[f.NegOffset:] + } else { + str = str[:f.NegOffset] + } + } + sufStart := 1 + str[0] + affix = str[1:sufStart] + suffix = str[sufStart+1:] + } + // TODO: introduce a NeedNeg sign to indicate if the left pattern already + // has a sign marked? + if f.NegOffset == 0 && (neg || f.Flags&AlwaysSign != 0) { + affix = "-" + affix + } + return affix, suffix +} + +func (f *Formatter) renderSpecial(dst []byte, d *Digits) (b []byte, ok bool) { + if d.NaN { + return fmtNaN(dst, f), true + } + if d.Inf { + return fmtInfinite(dst, f, d), true + } + return dst, false +} + +func fmtNaN(dst []byte, f *Formatter) []byte { + return append(dst, f.Symbol(SymNan)...) +} + +func fmtInfinite(dst []byte, f *Formatter, d *Digits) []byte { + affix, suffix := f.getAffixes(d.Neg) + dst = appendAffix(dst, f, affix, d.Neg) + dst = append(dst, f.Symbol(SymInfinity)...) + dst = appendAffix(dst, f, suffix, d.Neg) + return dst +} + +func appendAffix(dst []byte, f *Formatter, affix string, neg bool) []byte { + quoting := false + escaping := false + for _, r := range affix { + switch { + case escaping: + // escaping occurs both inside and outside of quotes + dst = append(dst, string(r)...) + escaping = false + case r == '\\': + escaping = true + case r == '\'': + quoting = !quoting + case quoting: + dst = append(dst, string(r)...) + case r == '%': + if f.DigitShift == 3 { + dst = append(dst, f.Symbol(SymPerMille)...) + } else { + dst = append(dst, f.Symbol(SymPercentSign)...) + } + case r == '-' || r == '+': + if neg { + dst = append(dst, f.Symbol(SymMinusSign)...) + } else if f.Flags&ElideSign == 0 { + dst = append(dst, f.Symbol(SymPlusSign)...) + } else { + dst = append(dst, ' ') + } + default: + dst = append(dst, string(r)...) + } + } + return dst +} diff --git a/vendor/golang.org/x/text/internal/number/number.go b/vendor/golang.org/x/text/internal/number/number.go new file mode 100644 index 00000000..e1d933c3 --- /dev/null +++ b/vendor/golang.org/x/text/internal/number/number.go @@ -0,0 +1,152 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:generate go run gen.go gen_common.go + +// Package number contains tools and data for formatting numbers. +package number + +import ( + "unicode/utf8" + + "golang.org/x/text/internal/language/compact" + "golang.org/x/text/language" +) + +// Info holds number formatting configuration data. +type Info struct { + system systemData // numbering system information + symIndex symOffset // index to symbols +} + +// InfoFromLangID returns a Info for the given compact language identifier and +// numbering system identifier. If system is the empty string, the default +// numbering system will be taken for that language. +func InfoFromLangID(compactIndex compact.ID, numberSystem string) Info { + p := langToDefaults[compactIndex] + // Lookup the entry for the language. + pSymIndex := symOffset(0) // Default: Latin, default symbols + system, ok := systemMap[numberSystem] + if !ok { + // Take the value for the default numbering system. This is by far the + // most common case as an alternative numbering system is hardly used. + if p&hasNonLatnMask == 0 { // Latn digits. + pSymIndex = p + } else { // Non-Latn or multiple numbering systems. + // Take the first entry from the alternatives list. + data := langToAlt[p&^hasNonLatnMask] + pSymIndex = data.symIndex + system = data.system + } + } else { + langIndex := compactIndex + ns := system + outerLoop: + for ; ; p = langToDefaults[langIndex] { + if p&hasNonLatnMask == 0 { + if ns == 0 { + // The index directly points to the symbol data. + pSymIndex = p + break + } + // Move to the parent and retry. + langIndex = langIndex.Parent() + } else { + // The index points to a list of symbol data indexes. + for _, e := range langToAlt[p&^hasNonLatnMask:] { + if e.compactTag != langIndex { + if langIndex == 0 { + // The CLDR root defines full symbol information for + // all numbering systems (even though mostly by + // means of aliases). Fall back to the default entry + // for Latn if there is no data for the numbering + // system of this language. + if ns == 0 { + break + } + // Fall back to Latin and start from the original + // language. See + // https://unicode.org/reports/tr35/#Locale_Inheritance. + ns = numLatn + langIndex = compactIndex + continue outerLoop + } + // Fall back to parent. + langIndex = langIndex.Parent() + } else if e.system == ns { + pSymIndex = e.symIndex + break outerLoop + } + } + } + } + } + if int(system) >= len(numSysData) { // algorithmic + // Will generate ASCII digits in case the user inadvertently calls + // WriteDigit or Digit on it. + d := numSysData[0] + d.id = system + return Info{ + system: d, + symIndex: pSymIndex, + } + } + return Info{ + system: numSysData[system], + symIndex: pSymIndex, + } +} + +// InfoFromTag returns a Info for the given language tag. +func InfoFromTag(t language.Tag) Info { + return InfoFromLangID(tagToID(t), t.TypeForKey("nu")) +} + +// IsDecimal reports if the numbering system can convert decimal to native +// symbols one-to-one. +func (n Info) IsDecimal() bool { + return int(n.system.id) < len(numSysData) +} + +// WriteDigit writes the UTF-8 sequence for n corresponding to the given ASCII +// digit to dst and reports the number of bytes written. dst must be large +// enough to hold the rune (can be up to utf8.UTFMax bytes). +func (n Info) WriteDigit(dst []byte, asciiDigit rune) int { + copy(dst, n.system.zero[:n.system.digitSize]) + dst[n.system.digitSize-1] += byte(asciiDigit - '0') + return int(n.system.digitSize) +} + +// AppendDigit appends the UTF-8 sequence for n corresponding to the given digit +// to dst and reports the number of bytes written. dst must be large enough to +// hold the rune (can be up to utf8.UTFMax bytes). +func (n Info) AppendDigit(dst []byte, digit byte) []byte { + dst = append(dst, n.system.zero[:n.system.digitSize]...) + dst[len(dst)-1] += digit + return dst +} + +// Digit returns the digit for the numbering system for the corresponding ASCII +// value. For example, ni.Digit('3') could return '三'. Note that the argument +// is the rune constant '3', which equals 51, not the integer constant 3. +func (n Info) Digit(asciiDigit rune) rune { + var x [utf8.UTFMax]byte + n.WriteDigit(x[:], asciiDigit) + r, _ := utf8.DecodeRune(x[:]) + return r +} + +// Symbol returns the string for the given symbol type. +func (n Info) Symbol(t SymbolType) string { + return symData.Elem(int(symIndex[n.symIndex][t])) +} + +func formatForLang(t language.Tag, index []byte) *Pattern { + return &formats[index[tagToID(t)]] +} + +func tagToID(t language.Tag) compact.ID { + id, _ := compact.RegionalID(compact.Tag(t)) + return id +} diff --git a/vendor/golang.org/x/text/internal/number/pattern.go b/vendor/golang.org/x/text/internal/number/pattern.go new file mode 100644 index 00000000..06e59559 --- /dev/null +++ b/vendor/golang.org/x/text/internal/number/pattern.go @@ -0,0 +1,485 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package number + +import ( + "errors" + "unicode/utf8" +) + +// This file contains a parser for the CLDR number patterns as described in +// https://unicode.org/reports/tr35/tr35-numbers.html#Number_Format_Patterns. +// +// The following BNF is derived from this standard. +// +// pattern := subpattern (';' subpattern)? +// subpattern := affix? number exponent? affix? +// number := decimal | sigDigits +// decimal := '#'* '0'* ('.' fraction)? | '#' | '0' +// fraction := '0'* '#'* +// sigDigits := '#'* '@' '@'* '#'* +// exponent := 'E' '+'? '0'* '0' +// padSpec := '*' \L +// +// Notes: +// - An affix pattern may contain any runes, but runes with special meaning +// should be escaped. +// - Sequences of digits, '#', and '@' in decimal and sigDigits may have +// interstitial commas. + +// TODO: replace special characters in affixes (-, +, ¤) with control codes. + +// Pattern holds information for formatting numbers. It is designed to hold +// information from CLDR number patterns. +// +// This pattern is precompiled for all patterns for all languages. Even though +// the number of patterns is not very large, we want to keep this small. +// +// This type is only intended for internal use. +type Pattern struct { + RoundingContext + + Affix string // includes prefix and suffix. First byte is prefix length. + Offset uint16 // Offset into Affix for prefix and suffix + NegOffset uint16 // Offset into Affix for negative prefix and suffix or 0. + PadRune rune + FormatWidth uint16 + + GroupingSize [2]uint8 + Flags PatternFlag +} + +// A RoundingContext indicates how a number should be converted to digits. +// It contains all information needed to determine the "visible digits" as +// required by the pluralization rules. +type RoundingContext struct { + // TODO: unify these two fields so that there is a more unambiguous meaning + // of how precision is handled. + MaxSignificantDigits int16 // -1 is unlimited + MaxFractionDigits int16 // -1 is unlimited + + Increment uint32 + IncrementScale uint8 // May differ from printed scale. + + Mode RoundingMode + + DigitShift uint8 // Number of decimals to shift. Used for % and ‰. + + // Number of digits. + MinIntegerDigits uint8 + + MaxIntegerDigits uint8 + MinFractionDigits uint8 + MinSignificantDigits uint8 + + MinExponentDigits uint8 +} + +// RoundSignificantDigits returns the number of significant digits an +// implementation of Convert may round to or n < 0 if there is no maximum or +// a maximum is not recommended. +func (r *RoundingContext) RoundSignificantDigits() (n int) { + if r.MaxFractionDigits == 0 && r.MaxSignificantDigits > 0 { + return int(r.MaxSignificantDigits) + } else if r.isScientific() && r.MaxIntegerDigits == 1 { + if r.MaxSignificantDigits == 0 || + int(r.MaxFractionDigits+1) == int(r.MaxSignificantDigits) { + // Note: don't add DigitShift: it is only used for decimals. + return int(r.MaxFractionDigits) + 1 + } + } + return -1 +} + +// RoundFractionDigits returns the number of fraction digits an implementation +// of Convert may round to or n < 0 if there is no maximum or a maximum is not +// recommended. +func (r *RoundingContext) RoundFractionDigits() (n int) { + if r.MinExponentDigits == 0 && + r.MaxSignificantDigits == 0 && + r.MaxFractionDigits >= 0 { + return int(r.MaxFractionDigits) + int(r.DigitShift) + } + return -1 +} + +// SetScale fixes the RoundingContext to a fixed number of fraction digits. +func (r *RoundingContext) SetScale(scale int) { + r.MinFractionDigits = uint8(scale) + r.MaxFractionDigits = int16(scale) +} + +func (r *RoundingContext) SetPrecision(prec int) { + r.MaxSignificantDigits = int16(prec) +} + +func (r *RoundingContext) isScientific() bool { + return r.MinExponentDigits > 0 +} + +func (f *Pattern) needsSep(pos int) bool { + p := pos - 1 + size := int(f.GroupingSize[0]) + if size == 0 || p == 0 { + return false + } + if p == size { + return true + } + if p -= size; p < 0 { + return false + } + // TODO: make second groupingsize the same as first if 0 so that we can + // avoid this check. + if x := int(f.GroupingSize[1]); x != 0 { + size = x + } + return p%size == 0 +} + +// A PatternFlag is a bit mask for the flag field of a Pattern. +type PatternFlag uint8 + +const ( + AlwaysSign PatternFlag = 1 << iota + ElideSign // Use space instead of plus sign. AlwaysSign must be true. + AlwaysExpSign + AlwaysDecimalSeparator + ParenthesisForNegative // Common pattern. Saves space. + + PadAfterNumber + PadAfterAffix + + PadBeforePrefix = 0 // Default + PadAfterPrefix = PadAfterAffix + PadBeforeSuffix = PadAfterNumber + PadAfterSuffix = PadAfterNumber | PadAfterAffix + PadMask = PadAfterNumber | PadAfterAffix +) + +type parser struct { + *Pattern + + leadingSharps int + + pos int + err error + doNotTerminate bool + groupingCount uint + hasGroup bool + buf []byte +} + +func (p *parser) setError(err error) { + if p.err == nil { + p.err = err + } +} + +func (p *parser) updateGrouping() { + if p.hasGroup && + 0 < p.groupingCount && p.groupingCount < 255 { + p.GroupingSize[1] = p.GroupingSize[0] + p.GroupingSize[0] = uint8(p.groupingCount) + } + p.groupingCount = 0 + p.hasGroup = true +} + +var ( + // TODO: more sensible and localizeable error messages. + errMultiplePadSpecifiers = errors.New("format: pattern has multiple pad specifiers") + errInvalidPadSpecifier = errors.New("format: invalid pad specifier") + errInvalidQuote = errors.New("format: invalid quote") + errAffixTooLarge = errors.New("format: prefix or suffix exceeds maximum UTF-8 length of 256 bytes") + errDuplicatePercentSign = errors.New("format: duplicate percent sign") + errDuplicatePermilleSign = errors.New("format: duplicate permille sign") + errUnexpectedEnd = errors.New("format: unexpected end of pattern") +) + +// ParsePattern extracts formatting information from a CLDR number pattern. +// +// See https://unicode.org/reports/tr35/tr35-numbers.html#Number_Format_Patterns. +func ParsePattern(s string) (f *Pattern, err error) { + p := parser{Pattern: &Pattern{}} + + s = p.parseSubPattern(s) + + if s != "" { + // Parse negative sub pattern. + if s[0] != ';' { + p.setError(errors.New("format: error parsing first sub pattern")) + return nil, p.err + } + neg := parser{Pattern: &Pattern{}} // just for extracting the affixes. + s = neg.parseSubPattern(s[len(";"):]) + p.NegOffset = uint16(len(p.buf)) + p.buf = append(p.buf, neg.buf...) + } + if s != "" { + p.setError(errors.New("format: spurious characters at end of pattern")) + } + if p.err != nil { + return nil, p.err + } + if affix := string(p.buf); affix == "\x00\x00" || affix == "\x00\x00\x00\x00" { + // No prefix or suffixes. + p.NegOffset = 0 + } else { + p.Affix = affix + } + if p.Increment == 0 { + p.IncrementScale = 0 + } + return p.Pattern, nil +} + +func (p *parser) parseSubPattern(s string) string { + s = p.parsePad(s, PadBeforePrefix) + s = p.parseAffix(s) + s = p.parsePad(s, PadAfterPrefix) + + s = p.parse(p.number, s) + p.updateGrouping() + + s = p.parsePad(s, PadBeforeSuffix) + s = p.parseAffix(s) + s = p.parsePad(s, PadAfterSuffix) + return s +} + +func (p *parser) parsePad(s string, f PatternFlag) (tail string) { + if len(s) >= 2 && s[0] == '*' { + r, sz := utf8.DecodeRuneInString(s[1:]) + if p.PadRune != 0 { + p.err = errMultiplePadSpecifiers + } else { + p.Flags |= f + p.PadRune = r + } + return s[1+sz:] + } + return s +} + +func (p *parser) parseAffix(s string) string { + x := len(p.buf) + p.buf = append(p.buf, 0) // placeholder for affix length + + s = p.parse(p.affix, s) + + n := len(p.buf) - x - 1 + if n > 0xFF { + p.setError(errAffixTooLarge) + } + p.buf[x] = uint8(n) + return s +} + +// state implements a state transition. It returns the new state. A state +// function may set an error on the parser or may simply return on an incorrect +// token and let the next phase fail. +type state func(r rune) state + +// parse repeatedly applies a state function on the given string until a +// termination condition is reached. +func (p *parser) parse(fn state, s string) (tail string) { + for i, r := range s { + p.doNotTerminate = false + if fn = fn(r); fn == nil || p.err != nil { + return s[i:] + } + p.FormatWidth++ + } + if p.doNotTerminate { + p.setError(errUnexpectedEnd) + } + return "" +} + +func (p *parser) affix(r rune) state { + switch r { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9', + '#', '@', '.', '*', ',', ';': + return nil + case '\'': + p.FormatWidth-- + return p.escapeFirst + case '%': + if p.DigitShift != 0 { + p.setError(errDuplicatePercentSign) + } + p.DigitShift = 2 + case '\u2030': // ‰ Per mille + if p.DigitShift != 0 { + p.setError(errDuplicatePermilleSign) + } + p.DigitShift = 3 + // TODO: handle currency somehow: ¤, ¤¤, ¤¤¤, ¤¤¤¤ + } + p.buf = append(p.buf, string(r)...) + return p.affix +} + +func (p *parser) escapeFirst(r rune) state { + switch r { + case '\'': + p.buf = append(p.buf, "\\'"...) + return p.affix + default: + p.buf = append(p.buf, '\'') + p.buf = append(p.buf, string(r)...) + } + return p.escape +} + +func (p *parser) escape(r rune) state { + switch r { + case '\'': + p.FormatWidth-- + p.buf = append(p.buf, '\'') + return p.affix + default: + p.buf = append(p.buf, string(r)...) + } + return p.escape +} + +// number parses a number. The BNF says the integer part should always have +// a '0', but that does not appear to be the case according to the rest of the +// documentation. We will allow having only '#' numbers. +func (p *parser) number(r rune) state { + switch r { + case '#': + p.groupingCount++ + p.leadingSharps++ + case '@': + p.groupingCount++ + p.leadingSharps = 0 + p.MaxFractionDigits = -1 + return p.sigDigits(r) + case ',': + if p.leadingSharps == 0 { // no leading commas + return nil + } + p.updateGrouping() + case 'E': + p.MaxIntegerDigits = uint8(p.leadingSharps) + return p.exponent + case '.': // allow ".##" etc. + p.updateGrouping() + return p.fraction + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + return p.integer(r) + default: + return nil + } + return p.number +} + +func (p *parser) integer(r rune) state { + if !('0' <= r && r <= '9') { + var next state + switch r { + case 'E': + if p.leadingSharps > 0 { + p.MaxIntegerDigits = uint8(p.leadingSharps) + p.MinIntegerDigits + } + next = p.exponent + case '.': + next = p.fraction + case ',': + next = p.integer + } + p.updateGrouping() + return next + } + p.Increment = p.Increment*10 + uint32(r-'0') + p.groupingCount++ + p.MinIntegerDigits++ + return p.integer +} + +func (p *parser) sigDigits(r rune) state { + switch r { + case '@': + p.groupingCount++ + p.MaxSignificantDigits++ + p.MinSignificantDigits++ + case '#': + return p.sigDigitsFinal(r) + case 'E': + p.updateGrouping() + return p.normalizeSigDigitsWithExponent() + default: + p.updateGrouping() + return nil + } + return p.sigDigits +} + +func (p *parser) sigDigitsFinal(r rune) state { + switch r { + case '#': + p.groupingCount++ + p.MaxSignificantDigits++ + case 'E': + p.updateGrouping() + return p.normalizeSigDigitsWithExponent() + default: + p.updateGrouping() + return nil + } + return p.sigDigitsFinal +} + +func (p *parser) normalizeSigDigitsWithExponent() state { + p.MinIntegerDigits, p.MaxIntegerDigits = 1, 1 + p.MinFractionDigits = p.MinSignificantDigits - 1 + p.MaxFractionDigits = p.MaxSignificantDigits - 1 + p.MinSignificantDigits, p.MaxSignificantDigits = 0, 0 + return p.exponent +} + +func (p *parser) fraction(r rune) state { + switch r { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + p.Increment = p.Increment*10 + uint32(r-'0') + p.IncrementScale++ + p.MinFractionDigits++ + p.MaxFractionDigits++ + case '#': + p.MaxFractionDigits++ + case 'E': + if p.leadingSharps > 0 { + p.MaxIntegerDigits = uint8(p.leadingSharps) + p.MinIntegerDigits + } + return p.exponent + default: + return nil + } + return p.fraction +} + +func (p *parser) exponent(r rune) state { + switch r { + case '+': + // Set mode and check it wasn't already set. + if p.Flags&AlwaysExpSign != 0 || p.MinExponentDigits > 0 { + break + } + p.Flags |= AlwaysExpSign + p.doNotTerminate = true + return p.exponent + case '0': + p.MinExponentDigits++ + return p.exponent + } + // termination condition + if p.MinExponentDigits == 0 { + p.setError(errors.New("format: need at least one digit")) + } + return nil +} diff --git a/vendor/golang.org/x/text/internal/number/roundingmode_string.go b/vendor/golang.org/x/text/internal/number/roundingmode_string.go new file mode 100644 index 00000000..bcc22471 --- /dev/null +++ b/vendor/golang.org/x/text/internal/number/roundingmode_string.go @@ -0,0 +1,30 @@ +// Code generated by "stringer -type RoundingMode"; DO NOT EDIT. + +package number + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[ToNearestEven-0] + _ = x[ToNearestZero-1] + _ = x[ToNearestAway-2] + _ = x[ToPositiveInf-3] + _ = x[ToNegativeInf-4] + _ = x[ToZero-5] + _ = x[AwayFromZero-6] + _ = x[numModes-7] +} + +const _RoundingMode_name = "ToNearestEvenToNearestZeroToNearestAwayToPositiveInfToNegativeInfToZeroAwayFromZeronumModes" + +var _RoundingMode_index = [...]uint8{0, 13, 26, 39, 52, 65, 71, 83, 91} + +func (i RoundingMode) String() string { + if i >= RoundingMode(len(_RoundingMode_index)-1) { + return "RoundingMode(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _RoundingMode_name[_RoundingMode_index[i]:_RoundingMode_index[i+1]] +} diff --git a/vendor/golang.org/x/text/internal/number/tables.go b/vendor/golang.org/x/text/internal/number/tables.go new file mode 100644 index 00000000..8efce81b --- /dev/null +++ b/vendor/golang.org/x/text/internal/number/tables.go @@ -0,0 +1,1219 @@ +// Code generated by running "go generate" in golang.org/x/text. DO NOT EDIT. + +package number + +import "golang.org/x/text/internal/stringset" + +// CLDRVersion is the CLDR version from which the tables in this package are derived. +const CLDRVersion = "32" + +var numSysData = []systemData{ // 59 elements + 0: {id: 0x0, digitSize: 0x1, zero: [4]uint8{0x30, 0x0, 0x0, 0x0}}, + 1: {id: 0x1, digitSize: 0x4, zero: [4]uint8{0xf0, 0x9e, 0xa5, 0x90}}, + 2: {id: 0x2, digitSize: 0x4, zero: [4]uint8{0xf0, 0x91, 0x9c, 0xb0}}, + 3: {id: 0x3, digitSize: 0x2, zero: [4]uint8{0xd9, 0xa0, 0x0, 0x0}}, + 4: {id: 0x4, digitSize: 0x2, zero: [4]uint8{0xdb, 0xb0, 0x0, 0x0}}, + 5: {id: 0x5, digitSize: 0x3, zero: [4]uint8{0xe1, 0xad, 0x90, 0x0}}, + 6: {id: 0x6, digitSize: 0x3, zero: [4]uint8{0xe0, 0xa7, 0xa6, 0x0}}, + 7: {id: 0x7, digitSize: 0x4, zero: [4]uint8{0xf0, 0x91, 0xb1, 0x90}}, + 8: {id: 0x8, digitSize: 0x4, zero: [4]uint8{0xf0, 0x91, 0x81, 0xa6}}, + 9: {id: 0x9, digitSize: 0x4, zero: [4]uint8{0xf0, 0x91, 0x84, 0xb6}}, + 10: {id: 0xa, digitSize: 0x3, zero: [4]uint8{0xea, 0xa9, 0x90, 0x0}}, + 11: {id: 0xb, digitSize: 0x3, zero: [4]uint8{0xe0, 0xa5, 0xa6, 0x0}}, + 12: {id: 0xc, digitSize: 0x3, zero: [4]uint8{0xef, 0xbc, 0x90, 0x0}}, + 13: {id: 0xd, digitSize: 0x4, zero: [4]uint8{0xf0, 0x91, 0xb5, 0x90}}, + 14: {id: 0xe, digitSize: 0x3, zero: [4]uint8{0xe0, 0xab, 0xa6, 0x0}}, + 15: {id: 0xf, digitSize: 0x3, zero: [4]uint8{0xe0, 0xa9, 0xa6, 0x0}}, + 16: {id: 0x10, digitSize: 0x4, zero: [4]uint8{0xf0, 0x96, 0xad, 0x90}}, + 17: {id: 0x11, digitSize: 0x3, zero: [4]uint8{0xea, 0xa7, 0x90, 0x0}}, + 18: {id: 0x12, digitSize: 0x3, zero: [4]uint8{0xea, 0xa4, 0x80, 0x0}}, + 19: {id: 0x13, digitSize: 0x3, zero: [4]uint8{0xe1, 0x9f, 0xa0, 0x0}}, + 20: {id: 0x14, digitSize: 0x3, zero: [4]uint8{0xe0, 0xb3, 0xa6, 0x0}}, + 21: {id: 0x15, digitSize: 0x3, zero: [4]uint8{0xe1, 0xaa, 0x80, 0x0}}, + 22: {id: 0x16, digitSize: 0x3, zero: [4]uint8{0xe1, 0xaa, 0x90, 0x0}}, + 23: {id: 0x17, digitSize: 0x3, zero: [4]uint8{0xe0, 0xbb, 0x90, 0x0}}, + 24: {id: 0x18, digitSize: 0x3, zero: [4]uint8{0xe1, 0xb1, 0x80, 0x0}}, + 25: {id: 0x19, digitSize: 0x3, zero: [4]uint8{0xe1, 0xa5, 0x86, 0x0}}, + 26: {id: 0x1a, digitSize: 0x4, zero: [4]uint8{0xf0, 0x9d, 0x9f, 0x8e}}, + 27: {id: 0x1b, digitSize: 0x4, zero: [4]uint8{0xf0, 0x9d, 0x9f, 0x98}}, + 28: {id: 0x1c, digitSize: 0x4, zero: [4]uint8{0xf0, 0x9d, 0x9f, 0xb6}}, + 29: {id: 0x1d, digitSize: 0x4, zero: [4]uint8{0xf0, 0x9d, 0x9f, 0xac}}, + 30: {id: 0x1e, digitSize: 0x4, zero: [4]uint8{0xf0, 0x9d, 0x9f, 0xa2}}, + 31: {id: 0x1f, digitSize: 0x3, zero: [4]uint8{0xe0, 0xb5, 0xa6, 0x0}}, + 32: {id: 0x20, digitSize: 0x4, zero: [4]uint8{0xf0, 0x91, 0x99, 0x90}}, + 33: {id: 0x21, digitSize: 0x3, zero: [4]uint8{0xe1, 0xa0, 0x90, 0x0}}, + 34: {id: 0x22, digitSize: 0x4, zero: [4]uint8{0xf0, 0x96, 0xa9, 0xa0}}, + 35: {id: 0x23, digitSize: 0x3, zero: [4]uint8{0xea, 0xaf, 0xb0, 0x0}}, + 36: {id: 0x24, digitSize: 0x3, zero: [4]uint8{0xe1, 0x81, 0x80, 0x0}}, + 37: {id: 0x25, digitSize: 0x3, zero: [4]uint8{0xe1, 0x82, 0x90, 0x0}}, + 38: {id: 0x26, digitSize: 0x3, zero: [4]uint8{0xea, 0xa7, 0xb0, 0x0}}, + 39: {id: 0x27, digitSize: 0x4, zero: [4]uint8{0xf0, 0x91, 0x91, 0x90}}, + 40: {id: 0x28, digitSize: 0x2, zero: [4]uint8{0xdf, 0x80, 0x0, 0x0}}, + 41: {id: 0x29, digitSize: 0x3, zero: [4]uint8{0xe1, 0xb1, 0x90, 0x0}}, + 42: {id: 0x2a, digitSize: 0x3, zero: [4]uint8{0xe0, 0xad, 0xa6, 0x0}}, + 43: {id: 0x2b, digitSize: 0x4, zero: [4]uint8{0xf0, 0x90, 0x92, 0xa0}}, + 44: {id: 0x2c, digitSize: 0x3, zero: [4]uint8{0xea, 0xa3, 0x90, 0x0}}, + 45: {id: 0x2d, digitSize: 0x4, zero: [4]uint8{0xf0, 0x91, 0x87, 0x90}}, + 46: {id: 0x2e, digitSize: 0x4, zero: [4]uint8{0xf0, 0x91, 0x8b, 0xb0}}, + 47: {id: 0x2f, digitSize: 0x3, zero: [4]uint8{0xe0, 0xb7, 0xa6, 0x0}}, + 48: {id: 0x30, digitSize: 0x4, zero: [4]uint8{0xf0, 0x91, 0x83, 0xb0}}, + 49: {id: 0x31, digitSize: 0x3, zero: [4]uint8{0xe1, 0xae, 0xb0, 0x0}}, + 50: {id: 0x32, digitSize: 0x4, zero: [4]uint8{0xf0, 0x91, 0x9b, 0x80}}, + 51: {id: 0x33, digitSize: 0x3, zero: [4]uint8{0xe1, 0xa7, 0x90, 0x0}}, + 52: {id: 0x34, digitSize: 0x3, zero: [4]uint8{0xe0, 0xaf, 0xa6, 0x0}}, + 53: {id: 0x35, digitSize: 0x3, zero: [4]uint8{0xe0, 0xb1, 0xa6, 0x0}}, + 54: {id: 0x36, digitSize: 0x3, zero: [4]uint8{0xe0, 0xb9, 0x90, 0x0}}, + 55: {id: 0x37, digitSize: 0x3, zero: [4]uint8{0xe0, 0xbc, 0xa0, 0x0}}, + 56: {id: 0x38, digitSize: 0x4, zero: [4]uint8{0xf0, 0x91, 0x93, 0x90}}, + 57: {id: 0x39, digitSize: 0x3, zero: [4]uint8{0xea, 0x98, 0xa0, 0x0}}, + 58: {id: 0x3a, digitSize: 0x4, zero: [4]uint8{0xf0, 0x91, 0xa3, 0xa0}}, +} // Size: 378 bytes + +const ( + numAdlm = 0x1 + numAhom = 0x2 + numArab = 0x3 + numArabext = 0x4 + numArmn = 0x3b + numArmnlow = 0x3c + numBali = 0x5 + numBeng = 0x6 + numBhks = 0x7 + numBrah = 0x8 + numCakm = 0x9 + numCham = 0xa + numCyrl = 0x3d + numDeva = 0xb + numEthi = 0x3e + numFullwide = 0xc + numGeor = 0x3f + numGonm = 0xd + numGrek = 0x40 + numGreklow = 0x41 + numGujr = 0xe + numGuru = 0xf + numHanidays = 0x42 + numHanidec = 0x43 + numHans = 0x44 + numHansfin = 0x45 + numHant = 0x46 + numHantfin = 0x47 + numHebr = 0x48 + numHmng = 0x10 + numJava = 0x11 + numJpan = 0x49 + numJpanfin = 0x4a + numKali = 0x12 + numKhmr = 0x13 + numKnda = 0x14 + numLana = 0x15 + numLanatham = 0x16 + numLaoo = 0x17 + numLatn = 0x0 + numLepc = 0x18 + numLimb = 0x19 + numMathbold = 0x1a + numMathdbl = 0x1b + numMathmono = 0x1c + numMathsanb = 0x1d + numMathsans = 0x1e + numMlym = 0x1f + numModi = 0x20 + numMong = 0x21 + numMroo = 0x22 + numMtei = 0x23 + numMymr = 0x24 + numMymrshan = 0x25 + numMymrtlng = 0x26 + numNewa = 0x27 + numNkoo = 0x28 + numOlck = 0x29 + numOrya = 0x2a + numOsma = 0x2b + numRoman = 0x4b + numRomanlow = 0x4c + numSaur = 0x2c + numShrd = 0x2d + numSind = 0x2e + numSinh = 0x2f + numSora = 0x30 + numSund = 0x31 + numTakr = 0x32 + numTalu = 0x33 + numTaml = 0x4d + numTamldec = 0x34 + numTelu = 0x35 + numThai = 0x36 + numTibt = 0x37 + numTirh = 0x38 + numVaii = 0x39 + numWara = 0x3a + numNumberSystems +) + +var systemMap = map[string]system{ + "adlm": numAdlm, + "ahom": numAhom, + "arab": numArab, + "arabext": numArabext, + "armn": numArmn, + "armnlow": numArmnlow, + "bali": numBali, + "beng": numBeng, + "bhks": numBhks, + "brah": numBrah, + "cakm": numCakm, + "cham": numCham, + "cyrl": numCyrl, + "deva": numDeva, + "ethi": numEthi, + "fullwide": numFullwide, + "geor": numGeor, + "gonm": numGonm, + "grek": numGrek, + "greklow": numGreklow, + "gujr": numGujr, + "guru": numGuru, + "hanidays": numHanidays, + "hanidec": numHanidec, + "hans": numHans, + "hansfin": numHansfin, + "hant": numHant, + "hantfin": numHantfin, + "hebr": numHebr, + "hmng": numHmng, + "java": numJava, + "jpan": numJpan, + "jpanfin": numJpanfin, + "kali": numKali, + "khmr": numKhmr, + "knda": numKnda, + "lana": numLana, + "lanatham": numLanatham, + "laoo": numLaoo, + "latn": numLatn, + "lepc": numLepc, + "limb": numLimb, + "mathbold": numMathbold, + "mathdbl": numMathdbl, + "mathmono": numMathmono, + "mathsanb": numMathsanb, + "mathsans": numMathsans, + "mlym": numMlym, + "modi": numModi, + "mong": numMong, + "mroo": numMroo, + "mtei": numMtei, + "mymr": numMymr, + "mymrshan": numMymrshan, + "mymrtlng": numMymrtlng, + "newa": numNewa, + "nkoo": numNkoo, + "olck": numOlck, + "orya": numOrya, + "osma": numOsma, + "roman": numRoman, + "romanlow": numRomanlow, + "saur": numSaur, + "shrd": numShrd, + "sind": numSind, + "sinh": numSinh, + "sora": numSora, + "sund": numSund, + "takr": numTakr, + "talu": numTalu, + "taml": numTaml, + "tamldec": numTamldec, + "telu": numTelu, + "thai": numThai, + "tibt": numTibt, + "tirh": numTirh, + "vaii": numVaii, + "wara": numWara, +} + +var symIndex = [][12]uint8{ // 81 elements + 0: [12]uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb}, + 1: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb}, + 2: [12]uint8{0x0, 0x1, 0x2, 0xd, 0xe, 0xf, 0x6, 0x7, 0x8, 0x9, 0x10, 0xb}, + 3: [12]uint8{0x1, 0x0, 0x2, 0xd, 0xe, 0xf, 0x6, 0x7, 0x8, 0x9, 0x10, 0xb}, + 4: [12]uint8{0x0, 0x1, 0x2, 0x11, 0xe, 0xf, 0x6, 0x7, 0x8, 0x9, 0x10, 0xb}, + 5: [12]uint8{0x1, 0x0, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0x12, 0xb}, + 6: [12]uint8{0x1, 0x0, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb}, + 7: [12]uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0x13, 0xb}, + 8: [12]uint8{0x0, 0x1, 0x2, 0x3, 0xe, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb}, + 9: [12]uint8{0x1, 0x0, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0x0}, + 10: [12]uint8{0x1, 0x0, 0x2, 0x3, 0x4, 0x5, 0x6, 0x14, 0x8, 0x9, 0xa, 0xb}, + 11: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x5, 0x6, 0x14, 0x8, 0x9, 0xa, 0xb}, + 12: [12]uint8{0x0, 0x15, 0x2, 0x3, 0x4, 0x5, 0x6, 0x14, 0x8, 0x9, 0xa, 0xb}, + 13: [12]uint8{0x0, 0xc, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb}, + 14: [12]uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0x16, 0xb}, + 15: [12]uint8{0x1, 0x0, 0x2, 0x3, 0x4, 0x5, 0x17, 0x7, 0x8, 0x9, 0xa, 0xb}, + 16: [12]uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x17, 0x7, 0x8, 0x9, 0xa, 0x0}, + 17: [12]uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x17, 0x7, 0x8, 0x9, 0xa, 0xb}, + 18: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0x0}, + 19: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x5, 0x18, 0x7, 0x8, 0x9, 0xa, 0xb}, + 20: [12]uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x19, 0x1a, 0xa, 0xb}, + 21: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x1b, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb}, + 22: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x1b, 0x18, 0x7, 0x8, 0x9, 0xa, 0xb}, + 23: [12]uint8{0x1, 0x0, 0x2, 0x3, 0x4, 0x1b, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb}, + 24: [12]uint8{0x0, 0x1, 0x2, 0x3, 0xe, 0x1c, 0x6, 0x7, 0x8, 0x9, 0x1d, 0xb}, + 25: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x1b, 0x6, 0x7, 0x8, 0x9, 0x1e, 0x0}, + 26: [12]uint8{0x0, 0x15, 0x2, 0x3, 0x4, 0x1b, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb}, + 27: [12]uint8{0x0, 0x1, 0x2, 0x3, 0xe, 0xf, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb}, + 28: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0x1f, 0xb}, + 29: [12]uint8{0x0, 0x15, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb}, + 30: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0x20, 0xb}, + 31: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x5, 0x21, 0x7, 0x8, 0x9, 0x22, 0xb}, + 32: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0x23, 0xb}, + 33: [12]uint8{0x1, 0x0, 0x2, 0x3, 0x4, 0x1b, 0x18, 0x14, 0x8, 0x9, 0x24, 0xb}, + 34: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x1b, 0x18, 0x7, 0x8, 0x9, 0x24, 0xb}, + 35: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0x25, 0xb}, + 36: [12]uint8{0x1, 0x0, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0x26, 0xb}, + 37: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0x27, 0xb}, + 38: [12]uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0x28, 0xb}, + 39: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0x29, 0xb}, + 40: [12]uint8{0x1, 0x0, 0x2, 0x3, 0xe, 0x1c, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb}, + 41: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0x2a, 0xb}, + 42: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0x2b, 0xb}, + 43: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x1b, 0x2c, 0x14, 0x8, 0x9, 0x24, 0xb}, + 44: [12]uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0x0}, + 45: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x5, 0x17, 0x7, 0x8, 0x9, 0xa, 0xb}, + 46: [12]uint8{0x1, 0x0, 0x2, 0x3, 0x4, 0x1b, 0x17, 0x7, 0x8, 0x9, 0xa, 0xb}, + 47: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0x2d, 0x0}, + 48: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0x2e, 0xb}, + 49: [12]uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0x2f, 0xb}, + 50: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x5, 0x30, 0x7, 0x8, 0x9, 0xa, 0xb}, + 51: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0x31, 0xb}, + 52: [12]uint8{0x1, 0xc, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0x32, 0xb}, + 53: [12]uint8{0x1, 0x15, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0xb}, + 54: [12]uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0x33, 0xb}, + 55: [12]uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0x34, 0xb}, + 56: [12]uint8{0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x7, 0x3c, 0x9, 0xa, 0xb}, + 57: [12]uint8{0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x7, 0x3c, 0x9, 0x3d, 0xb}, + 58: [12]uint8{0x35, 0x36, 0x37, 0x11, 0x3e, 0x3f, 0x3b, 0x7, 0x3c, 0x9, 0xa, 0xb}, + 59: [12]uint8{0x35, 0x36, 0x37, 0x11, 0x39, 0x3a, 0x3b, 0x7, 0x3c, 0x9, 0xa, 0xb}, + 60: [12]uint8{0x35, 0x36, 0x37, 0x11, 0x39, 0x40, 0x3b, 0x7, 0x3c, 0x9, 0xa, 0xb}, + 61: [12]uint8{0x35, 0x36, 0x37, 0x41, 0x3e, 0x3f, 0x3b, 0x7, 0x3c, 0x9, 0xa, 0xb}, + 62: [12]uint8{0x35, 0x36, 0x37, 0x38, 0x3e, 0x3f, 0x3b, 0x7, 0x3c, 0x9, 0xa, 0xb}, + 63: [12]uint8{0x35, 0xc, 0x37, 0x38, 0x39, 0x42, 0x3b, 0x7, 0x3c, 0x9, 0xa, 0x0}, + 64: [12]uint8{0x35, 0xc, 0x37, 0x38, 0x39, 0x42, 0x43, 0x7, 0x44, 0x9, 0x24, 0xb}, + 65: [12]uint8{0x35, 0x36, 0x37, 0x38, 0x39, 0x5, 0x3b, 0x7, 0x3c, 0x9, 0x33, 0xb}, + 66: [12]uint8{0x35, 0x36, 0x37, 0x11, 0x45, 0x46, 0x43, 0x7, 0x3c, 0x9, 0xa, 0x35}, + 67: [12]uint8{0x35, 0x36, 0x37, 0x11, 0xe, 0x1c, 0x43, 0x7, 0x3c, 0x9, 0x1d, 0xb}, + 68: [12]uint8{0x35, 0x36, 0x37, 0x11, 0xe, 0x1c, 0x43, 0x7, 0x3c, 0x9, 0xa, 0x35}, + 69: [12]uint8{0x35, 0x36, 0x37, 0x11, 0x45, 0x5, 0x43, 0x7, 0x3c, 0x9, 0xa, 0x35}, + 70: [12]uint8{0x1, 0xc, 0x37, 0x11, 0x45, 0x47, 0x43, 0x7, 0x3c, 0x9, 0xa, 0x0}, + 71: [12]uint8{0x35, 0x1, 0x37, 0x11, 0x4, 0x5, 0x43, 0x7, 0x3c, 0x9, 0xa, 0x35}, + 72: [12]uint8{0x1, 0xc, 0x37, 0x11, 0x45, 0x47, 0x43, 0x7, 0x3c, 0x9, 0x24, 0xb}, + 73: [12]uint8{0x35, 0x36, 0x2, 0x3, 0x45, 0x46, 0x43, 0x7, 0x8, 0x9, 0xa, 0x35}, + 74: [12]uint8{0x35, 0x36, 0x37, 0x11, 0x4, 0x5, 0x43, 0x7, 0x3c, 0x9, 0x31, 0x35}, + 75: [12]uint8{0x35, 0x36, 0x37, 0x11, 0x4, 0x5, 0x43, 0x7, 0x3c, 0x9, 0x32, 0x35}, + 76: [12]uint8{0x35, 0x36, 0x37, 0x11, 0x48, 0x46, 0x43, 0x7, 0x3c, 0x9, 0x33, 0x35}, + 77: [12]uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0xa, 0x49}, + 78: [12]uint8{0x0, 0x1, 0x4a, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0x28, 0xb}, + 79: [12]uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x9, 0x4b, 0xb}, + 80: [12]uint8{0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7, 0x8, 0x4c, 0x4d, 0xb}, +} // Size: 996 bytes + +var symData = stringset.Set{ + Data: "" + // Size: 599 bytes + ".,;%+-E׉∞NaN:\u00a0\u200e%\u200e\u200e+\u200e-ليس\u00a0رقمًا٪NDТерхьаш" + + "\u00a0дац·’mnne×10^0/00INF−\u200e−ناعددepälukuՈչԹარ\u00a0არის\u00a0რიცხვ" + + "იZMdMсан\u00a0емес¤¤¤сан\u00a0эмесບໍ່\u200bແມ່ນ\u200bໂຕ\u200bເລກNSဂဏန်" + + "းမဟုတ်သောННне\u00a0числочыыһыла\u00a0буотах·10^epilohosan\u00a0dälTFЕs" + + "on\u00a0emasҳақиқий\u00a0сон\u00a0эмас非數值非数值٫٬؛٪\u061c\u061c+\u061c-اس؉ل" + + "يس\u00a0رقم\u200f+\u200f-\u200f−٪\u200f\u061c−×۱۰^؉\u200f\u200e+\u200e" + + "\u200e-\u200e\u200e−\u200e+\u200e:၊ཨང་མེན་གྲངས་མེདཨང་མད", + Index: []uint16{ // 79 elements + // Entry 0 - 3F + 0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007, + 0x0009, 0x000c, 0x000f, 0x0012, 0x0013, 0x0015, 0x001c, 0x0020, + 0x0024, 0x0036, 0x0038, 0x003a, 0x0050, 0x0052, 0x0055, 0x0058, + 0x0059, 0x005e, 0x0062, 0x0065, 0x0068, 0x006e, 0x0078, 0x0080, + 0x0086, 0x00ae, 0x00af, 0x00b2, 0x00c2, 0x00c8, 0x00d8, 0x0105, + 0x0107, 0x012e, 0x0132, 0x0142, 0x015e, 0x0163, 0x016a, 0x0173, + 0x0175, 0x0177, 0x0180, 0x01a0, 0x01a9, 0x01b2, 0x01b4, 0x01b6, + 0x01b8, 0x01bc, 0x01bf, 0x01c2, 0x01c6, 0x01c8, 0x01d6, 0x01da, + // Entry 40 - 7F + 0x01de, 0x01e4, 0x01e9, 0x01ee, 0x01f5, 0x01fa, 0x0201, 0x0208, + 0x0211, 0x0215, 0x0218, 0x021b, 0x0230, 0x0248, 0x0257, + }, +} // Size: 797 bytes + +// langToDefaults maps a compact language index to the default numbering system +// and default symbol set +var langToDefaults = [775]symOffset{ + // Entry 0 - 3F + 0x8000, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0000, 0x0000, + 0x0000, 0x0000, 0x8003, 0x0002, 0x0002, 0x0002, 0x0002, 0x0003, + 0x0002, 0x0002, 0x0002, 0x0002, 0x0002, 0x0002, 0x0002, 0x0002, + 0x0003, 0x0003, 0x0003, 0x0003, 0x0002, 0x0002, 0x0002, 0x0004, + 0x0002, 0x0004, 0x0002, 0x0002, 0x0002, 0x0003, 0x0002, 0x0000, + 0x8005, 0x0000, 0x0000, 0x0000, 0x8006, 0x0005, 0x0006, 0x0006, + 0x0006, 0x0006, 0x0006, 0x0001, 0x0001, 0x0001, 0x0001, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0001, 0x0001, 0x0000, 0x0000, 0x0000, + // Entry 40 - 7F + 0x8009, 0x0000, 0x0000, 0x800a, 0x0000, 0x0000, 0x800c, 0x0001, + 0x0000, 0x0000, 0x0006, 0x0006, 0x0006, 0x0006, 0x0006, 0x0006, + 0x0006, 0x0006, 0x0006, 0x0006, 0x800e, 0x0000, 0x0000, 0x0007, + 0x0007, 0x0000, 0x0000, 0x0000, 0x0000, 0x800f, 0x0008, 0x0008, + 0x8011, 0x0001, 0x0001, 0x0001, 0x803c, 0x0000, 0x0009, 0x0009, + 0x0009, 0x0000, 0x0000, 0x000a, 0x000b, 0x000a, 0x000c, 0x000a, + 0x000a, 0x000c, 0x000a, 0x000d, 0x000d, 0x000a, 0x000a, 0x0001, + 0x0001, 0x0000, 0x0001, 0x0001, 0x803f, 0x0000, 0x0000, 0x0000, + // Entry 80 - BF + 0x000e, 0x000e, 0x000e, 0x000f, 0x000f, 0x000f, 0x0000, 0x0000, + 0x0006, 0x0000, 0x0000, 0x0000, 0x000a, 0x0010, 0x0000, 0x0006, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0011, 0x0000, 0x000a, + 0x0000, 0x0000, 0x0000, 0x0000, 0x000a, 0x0000, 0x0009, 0x0000, + 0x0000, 0x0012, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + // Entry C0 - FF + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0006, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0013, 0x0000, + 0x0000, 0x000f, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0001, 0x0000, 0x0000, 0x0015, + 0x0015, 0x0006, 0x0000, 0x0006, 0x0006, 0x0000, 0x0000, 0x0006, + 0x0006, 0x0001, 0x0000, 0x0000, 0x0006, 0x0006, 0x0006, 0x0006, + // Entry 100 - 13F + 0x0000, 0x0000, 0x0006, 0x0000, 0x0000, 0x0000, 0x0000, 0x0006, + 0x0000, 0x0006, 0x0000, 0x0000, 0x0006, 0x0006, 0x0016, 0x0016, + 0x0017, 0x0017, 0x0001, 0x0001, 0x8041, 0x0018, 0x0018, 0x0001, + 0x0001, 0x0001, 0x0001, 0x0001, 0x0019, 0x0019, 0x0000, 0x0000, + 0x0017, 0x0017, 0x0017, 0x8044, 0x0001, 0x0001, 0x0001, 0x0001, + 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, + 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, + 0x0001, 0x0001, 0x0006, 0x0006, 0x0001, 0x0001, 0x0001, 0x0001, + // Entry 140 - 17F + 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, + 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, + 0x0001, 0x0001, 0x0006, 0x0006, 0x0006, 0x0006, 0x0000, 0x0000, + 0x8047, 0x0000, 0x0006, 0x0006, 0x001a, 0x001a, 0x001a, 0x001a, + 0x804a, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x804c, 0x001b, 0x0000, + 0x0000, 0x0006, 0x0006, 0x0006, 0x000a, 0x000a, 0x0001, 0x0001, + 0x001c, 0x001c, 0x0009, 0x0009, 0x804f, 0x0000, 0x0000, 0x0000, + // Entry 180 - 1BF + 0x0000, 0x0000, 0x8052, 0x0006, 0x0006, 0x001d, 0x0006, 0x0006, + 0x0006, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0006, 0x0006, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x001e, 0x001e, 0x001f, + 0x001f, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0001, + 0x0001, 0x000d, 0x000d, 0x0000, 0x0000, 0x0020, 0x0020, 0x0006, + 0x0006, 0x0021, 0x0021, 0x0000, 0x0000, 0x0006, 0x0006, 0x0000, + 0x0000, 0x8054, 0x0000, 0x0000, 0x0000, 0x0000, 0x8056, 0x001b, + 0x0000, 0x0000, 0x0001, 0x0001, 0x0022, 0x0022, 0x0000, 0x0000, + // Entry 1C0 - 1FF + 0x0000, 0x0023, 0x0023, 0x0000, 0x0000, 0x0006, 0x0006, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0006, 0x0006, 0x0006, 0x0006, 0x0006, + 0x0024, 0x0024, 0x8058, 0x0000, 0x0000, 0x0016, 0x0016, 0x0006, + 0x0006, 0x0000, 0x0000, 0x0000, 0x0000, 0x0025, 0x0025, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x000d, 0x000d, 0x0000, 0x0000, + 0x0006, 0x0006, 0x0000, 0x0000, 0x0006, 0x0006, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x805a, 0x0000, 0x0000, 0x0006, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0006, 0x0006, 0x805b, 0x0026, 0x805d, + // Entry 200 - 23F + 0x0000, 0x0000, 0x0000, 0x0000, 0x805e, 0x0015, 0x0015, 0x0000, + 0x0000, 0x0006, 0x0006, 0x0006, 0x8061, 0x0000, 0x0000, 0x8062, + 0x0006, 0x0006, 0x0006, 0x0006, 0x0006, 0x0006, 0x0006, 0x0001, + 0x0001, 0x0015, 0x0015, 0x0006, 0x0006, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0027, 0x0027, 0x0027, 0x8065, 0x8067, + 0x001b, 0x0000, 0x0000, 0x0000, 0x0001, 0x0001, 0x0001, 0x0001, + 0x8069, 0x0028, 0x0006, 0x0001, 0x0006, 0x0001, 0x0001, 0x0001, + // Entry 240 - 27F + 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, 0x0000, + 0x0006, 0x0000, 0x0000, 0x001a, 0x001a, 0x0006, 0x0006, 0x0006, + 0x0006, 0x0006, 0x0000, 0x0000, 0x0029, 0x0029, 0x0029, 0x0029, + 0x0029, 0x0029, 0x0029, 0x0006, 0x0006, 0x0000, 0x0000, 0x002a, + 0x002a, 0x0000, 0x0000, 0x0000, 0x0000, 0x806b, 0x0000, 0x0000, + 0x002b, 0x002b, 0x002b, 0x002b, 0x0006, 0x0006, 0x000d, 0x000d, + 0x0006, 0x0006, 0x0000, 0x0001, 0x0001, 0x0001, 0x0001, 0x0001, + 0x002c, 0x002c, 0x002d, 0x002d, 0x002e, 0x002e, 0x0000, 0x0000, + // Entry 280 - 2BF + 0x0000, 0x002f, 0x002f, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0001, 0x0001, 0x0001, 0x0001, 0x0006, + 0x0006, 0x0006, 0x0006, 0x0006, 0x0006, 0x0006, 0x0006, 0x0006, + 0x0006, 0x0006, 0x0000, 0x0000, 0x0000, 0x806d, 0x0022, 0x0022, + 0x0022, 0x0000, 0x0006, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0001, 0x0001, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0030, 0x0030, 0x0000, 0x0000, 0x8071, 0x0031, 0x0006, + // Entry 2C0 - 2FF + 0x0006, 0x0006, 0x0000, 0x0001, 0x0001, 0x000d, 0x000d, 0x0001, + 0x0001, 0x0000, 0x0000, 0x0032, 0x0032, 0x8074, 0x8076, 0x001b, + 0x8077, 0x8079, 0x0028, 0x807b, 0x0034, 0x0033, 0x0033, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x0006, 0x0006, 0x0000, + 0x0000, 0x0000, 0x0000, 0x0000, 0x0035, 0x0035, 0x0006, 0x0006, + 0x0000, 0x0000, 0x0000, 0x0001, 0x0001, 0x0000, 0x0000, 0x0000, + 0x0000, 0x0000, 0x0036, 0x0037, 0x0037, 0x0036, 0x0036, 0x0001, + 0x0001, 0x807d, 0x0000, 0x0000, 0x0000, 0x0000, 0x0000, 0x8080, + // Entry 300 - 33F + 0x0036, 0x0036, 0x0036, 0x0000, 0x0000, 0x0006, 0x0014, +} // Size: 1550 bytes + +// langToAlt is a list of numbering system and symbol set pairs, sorted and +// marked by compact language index. +var langToAlt = []altSymData{ // 131 elements + 1: {compactTag: 0x0, symIndex: 0x38, system: 0x3}, + 2: {compactTag: 0x0, symIndex: 0x42, system: 0x4}, + 3: {compactTag: 0xa, symIndex: 0x39, system: 0x3}, + 4: {compactTag: 0xa, symIndex: 0x2, system: 0x0}, + 5: {compactTag: 0x28, symIndex: 0x0, system: 0x6}, + 6: {compactTag: 0x2c, symIndex: 0x5, system: 0x0}, + 7: {compactTag: 0x2c, symIndex: 0x3a, system: 0x3}, + 8: {compactTag: 0x2c, symIndex: 0x42, system: 0x4}, + 9: {compactTag: 0x40, symIndex: 0x0, system: 0x6}, + 10: {compactTag: 0x43, symIndex: 0x0, system: 0x0}, + 11: {compactTag: 0x43, symIndex: 0x4f, system: 0x37}, + 12: {compactTag: 0x46, symIndex: 0x1, system: 0x0}, + 13: {compactTag: 0x46, symIndex: 0x38, system: 0x3}, + 14: {compactTag: 0x54, symIndex: 0x0, system: 0x9}, + 15: {compactTag: 0x5d, symIndex: 0x3a, system: 0x3}, + 16: {compactTag: 0x5d, symIndex: 0x8, system: 0x0}, + 17: {compactTag: 0x60, symIndex: 0x1, system: 0x0}, + 18: {compactTag: 0x60, symIndex: 0x38, system: 0x3}, + 19: {compactTag: 0x60, symIndex: 0x42, system: 0x4}, + 20: {compactTag: 0x60, symIndex: 0x0, system: 0x5}, + 21: {compactTag: 0x60, symIndex: 0x0, system: 0x6}, + 22: {compactTag: 0x60, symIndex: 0x0, system: 0x8}, + 23: {compactTag: 0x60, symIndex: 0x0, system: 0x9}, + 24: {compactTag: 0x60, symIndex: 0x0, system: 0xa}, + 25: {compactTag: 0x60, symIndex: 0x0, system: 0xb}, + 26: {compactTag: 0x60, symIndex: 0x0, system: 0xc}, + 27: {compactTag: 0x60, symIndex: 0x0, system: 0xd}, + 28: {compactTag: 0x60, symIndex: 0x0, system: 0xe}, + 29: {compactTag: 0x60, symIndex: 0x0, system: 0xf}, + 30: {compactTag: 0x60, symIndex: 0x0, system: 0x11}, + 31: {compactTag: 0x60, symIndex: 0x0, system: 0x12}, + 32: {compactTag: 0x60, symIndex: 0x0, system: 0x13}, + 33: {compactTag: 0x60, symIndex: 0x0, system: 0x14}, + 34: {compactTag: 0x60, symIndex: 0x0, system: 0x15}, + 35: {compactTag: 0x60, symIndex: 0x0, system: 0x16}, + 36: {compactTag: 0x60, symIndex: 0x0, system: 0x17}, + 37: {compactTag: 0x60, symIndex: 0x0, system: 0x18}, + 38: {compactTag: 0x60, symIndex: 0x0, system: 0x19}, + 39: {compactTag: 0x60, symIndex: 0x0, system: 0x1f}, + 40: {compactTag: 0x60, symIndex: 0x0, system: 0x21}, + 41: {compactTag: 0x60, symIndex: 0x0, system: 0x23}, + 42: {compactTag: 0x60, symIndex: 0x0, system: 0x24}, + 43: {compactTag: 0x60, symIndex: 0x0, system: 0x25}, + 44: {compactTag: 0x60, symIndex: 0x0, system: 0x28}, + 45: {compactTag: 0x60, symIndex: 0x0, system: 0x29}, + 46: {compactTag: 0x60, symIndex: 0x0, system: 0x2a}, + 47: {compactTag: 0x60, symIndex: 0x0, system: 0x2b}, + 48: {compactTag: 0x60, symIndex: 0x0, system: 0x2c}, + 49: {compactTag: 0x60, symIndex: 0x0, system: 0x2d}, + 50: {compactTag: 0x60, symIndex: 0x0, system: 0x30}, + 51: {compactTag: 0x60, symIndex: 0x0, system: 0x31}, + 52: {compactTag: 0x60, symIndex: 0x0, system: 0x32}, + 53: {compactTag: 0x60, symIndex: 0x0, system: 0x33}, + 54: {compactTag: 0x60, symIndex: 0x0, system: 0x34}, + 55: {compactTag: 0x60, symIndex: 0x0, system: 0x35}, + 56: {compactTag: 0x60, symIndex: 0x0, system: 0x36}, + 57: {compactTag: 0x60, symIndex: 0x0, system: 0x37}, + 58: {compactTag: 0x60, symIndex: 0x0, system: 0x39}, + 59: {compactTag: 0x60, symIndex: 0x0, system: 0x43}, + 60: {compactTag: 0x64, symIndex: 0x0, system: 0x0}, + 61: {compactTag: 0x64, symIndex: 0x38, system: 0x3}, + 62: {compactTag: 0x64, symIndex: 0x42, system: 0x4}, + 63: {compactTag: 0x7c, symIndex: 0x50, system: 0x37}, + 64: {compactTag: 0x7c, symIndex: 0x0, system: 0x0}, + 65: {compactTag: 0x114, symIndex: 0x43, system: 0x4}, + 66: {compactTag: 0x114, symIndex: 0x18, system: 0x0}, + 67: {compactTag: 0x114, symIndex: 0x3b, system: 0x3}, + 68: {compactTag: 0x123, symIndex: 0x1, system: 0x0}, + 69: {compactTag: 0x123, symIndex: 0x3c, system: 0x3}, + 70: {compactTag: 0x123, symIndex: 0x44, system: 0x4}, + 71: {compactTag: 0x158, symIndex: 0x0, system: 0x0}, + 72: {compactTag: 0x158, symIndex: 0x3b, system: 0x3}, + 73: {compactTag: 0x158, symIndex: 0x45, system: 0x4}, + 74: {compactTag: 0x160, symIndex: 0x0, system: 0x0}, + 75: {compactTag: 0x160, symIndex: 0x38, system: 0x3}, + 76: {compactTag: 0x16d, symIndex: 0x1b, system: 0x0}, + 77: {compactTag: 0x16d, symIndex: 0x0, system: 0x9}, + 78: {compactTag: 0x16d, symIndex: 0x0, system: 0xa}, + 79: {compactTag: 0x17c, symIndex: 0x0, system: 0x0}, + 80: {compactTag: 0x17c, symIndex: 0x3d, system: 0x3}, + 81: {compactTag: 0x17c, symIndex: 0x42, system: 0x4}, + 82: {compactTag: 0x182, symIndex: 0x6, system: 0x0}, + 83: {compactTag: 0x182, symIndex: 0x38, system: 0x3}, + 84: {compactTag: 0x1b1, symIndex: 0x0, system: 0x0}, + 85: {compactTag: 0x1b1, symIndex: 0x3e, system: 0x3}, + 86: {compactTag: 0x1b6, symIndex: 0x42, system: 0x4}, + 87: {compactTag: 0x1b6, symIndex: 0x1b, system: 0x0}, + 88: {compactTag: 0x1d2, symIndex: 0x42, system: 0x4}, + 89: {compactTag: 0x1d2, symIndex: 0x0, system: 0x0}, + 90: {compactTag: 0x1f3, symIndex: 0x0, system: 0xb}, + 91: {compactTag: 0x1fd, symIndex: 0x4e, system: 0x24}, + 92: {compactTag: 0x1fd, symIndex: 0x26, system: 0x0}, + 93: {compactTag: 0x1ff, symIndex: 0x42, system: 0x4}, + 94: {compactTag: 0x204, symIndex: 0x15, system: 0x0}, + 95: {compactTag: 0x204, symIndex: 0x3f, system: 0x3}, + 96: {compactTag: 0x204, symIndex: 0x46, system: 0x4}, + 97: {compactTag: 0x20c, symIndex: 0x0, system: 0xb}, + 98: {compactTag: 0x20f, symIndex: 0x6, system: 0x0}, + 99: {compactTag: 0x20f, symIndex: 0x38, system: 0x3}, + 100: {compactTag: 0x20f, symIndex: 0x42, system: 0x4}, + 101: {compactTag: 0x22e, symIndex: 0x0, system: 0x0}, + 102: {compactTag: 0x22e, symIndex: 0x47, system: 0x4}, + 103: {compactTag: 0x22f, symIndex: 0x42, system: 0x4}, + 104: {compactTag: 0x22f, symIndex: 0x1b, system: 0x0}, + 105: {compactTag: 0x238, symIndex: 0x42, system: 0x4}, + 106: {compactTag: 0x238, symIndex: 0x28, system: 0x0}, + 107: {compactTag: 0x265, symIndex: 0x38, system: 0x3}, + 108: {compactTag: 0x265, symIndex: 0x0, system: 0x0}, + 109: {compactTag: 0x29d, symIndex: 0x22, system: 0x0}, + 110: {compactTag: 0x29d, symIndex: 0x40, system: 0x3}, + 111: {compactTag: 0x29d, symIndex: 0x48, system: 0x4}, + 112: {compactTag: 0x29d, symIndex: 0x4d, system: 0xc}, + 113: {compactTag: 0x2bd, symIndex: 0x31, system: 0x0}, + 114: {compactTag: 0x2bd, symIndex: 0x3e, system: 0x3}, + 115: {compactTag: 0x2bd, symIndex: 0x42, system: 0x4}, + 116: {compactTag: 0x2cd, symIndex: 0x1b, system: 0x0}, + 117: {compactTag: 0x2cd, symIndex: 0x49, system: 0x4}, + 118: {compactTag: 0x2ce, symIndex: 0x49, system: 0x4}, + 119: {compactTag: 0x2d0, symIndex: 0x33, system: 0x0}, + 120: {compactTag: 0x2d0, symIndex: 0x4a, system: 0x4}, + 121: {compactTag: 0x2d1, symIndex: 0x42, system: 0x4}, + 122: {compactTag: 0x2d1, symIndex: 0x28, system: 0x0}, + 123: {compactTag: 0x2d3, symIndex: 0x34, system: 0x0}, + 124: {compactTag: 0x2d3, symIndex: 0x4b, system: 0x4}, + 125: {compactTag: 0x2f9, symIndex: 0x0, system: 0x0}, + 126: {compactTag: 0x2f9, symIndex: 0x38, system: 0x3}, + 127: {compactTag: 0x2f9, symIndex: 0x42, system: 0x4}, + 128: {compactTag: 0x2ff, symIndex: 0x36, system: 0x0}, + 129: {compactTag: 0x2ff, symIndex: 0x41, system: 0x3}, + 130: {compactTag: 0x2ff, symIndex: 0x4c, system: 0x4}, +} // Size: 810 bytes + +var tagToDecimal = []uint8{ // 775 elements + // Entry 0 - 3F + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x05, 0x05, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + // Entry 40 - 7F + 0x05, 0x05, 0x05, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x05, 0x05, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x05, 0x05, 0x05, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x05, 0x05, 0x01, 0x01, + // Entry 80 - BF + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x05, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + // Entry C0 - FF + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + // Entry 100 - 13F + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + // Entry 140 - 17F + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x05, 0x05, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x05, + 0x05, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + // Entry 180 - 1BF + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x05, 0x05, 0x05, 0x05, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + // Entry 1C0 - 1FF + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x05, 0x05, + 0x01, 0x01, 0x01, 0x05, 0x05, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + // Entry 200 - 23F + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x05, 0x05, 0x01, 0x01, 0x01, 0x05, 0x01, + 0x01, 0x05, 0x05, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + // Entry 240 - 27F + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + // Entry 280 - 2BF + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x05, + 0x05, 0x05, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + // Entry 2C0 - 2FF + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + // Entry 300 - 33F + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x08, +} // Size: 799 bytes + +var tagToScientific = []uint8{ // 775 elements + // Entry 0 - 3F + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + // Entry 40 - 7F + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + // Entry 80 - BF + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + // Entry C0 - FF + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + // Entry 100 - 13F + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + // Entry 140 - 17F + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x0c, 0x0c, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x0c, + 0x0c, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + // Entry 180 - 1BF + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + // Entry 1C0 - 1FF + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x0d, 0x0d, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x0c, 0x0c, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + // Entry 200 - 23F + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x0c, 0x02, + 0x02, 0x0c, 0x0c, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + // Entry 240 - 27F + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x0d, 0x0d, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + // Entry 280 - 2BF + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + // Entry 2C0 - 2FF + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, + // Entry 300 - 33F + 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x09, +} // Size: 799 bytes + +var tagToPercent = []uint8{ // 775 elements + // Entry 0 - 3F + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x06, 0x06, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x03, 0x03, 0x03, 0x03, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + // Entry 40 - 7F + 0x06, 0x06, 0x06, 0x04, 0x04, 0x04, 0x03, 0x03, + 0x06, 0x06, 0x03, 0x04, 0x04, 0x03, 0x03, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x06, 0x06, 0x06, 0x03, + 0x03, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x03, 0x03, 0x04, 0x04, 0x04, 0x04, 0x03, 0x03, + 0x03, 0x04, 0x04, 0x03, 0x03, 0x03, 0x04, 0x03, + 0x03, 0x04, 0x03, 0x04, 0x04, 0x03, 0x03, 0x03, + 0x03, 0x04, 0x04, 0x04, 0x07, 0x07, 0x04, 0x04, + // Entry 80 - BF + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x03, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x03, 0x04, 0x03, 0x04, + 0x04, 0x03, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x06, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + // Entry C0 - FF + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x03, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, + 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, + // Entry 100 - 13F + 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, + 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x04, 0x04, + 0x0b, 0x0b, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x03, 0x03, 0x04, 0x04, + 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, + 0x03, 0x03, 0x03, 0x03, 0x03, 0x04, 0x03, 0x03, + 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, + 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, + // Entry 140 - 17F + 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, + 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, + 0x03, 0x03, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, + 0x06, 0x06, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x06, + 0x06, 0x04, 0x04, 0x04, 0x03, 0x03, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + // Entry 180 - 1BF + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x03, 0x03, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x06, 0x06, 0x06, 0x06, + 0x04, 0x04, 0x04, 0x04, 0x03, 0x03, 0x04, 0x04, + // Entry 1C0 - 1FF + 0x04, 0x04, 0x04, 0x04, 0x04, 0x03, 0x03, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x03, 0x03, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + // Entry 200 - 23F + 0x04, 0x04, 0x04, 0x04, 0x03, 0x03, 0x03, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x03, 0x03, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x06, 0x06, 0x04, 0x04, 0x04, 0x06, 0x04, + 0x04, 0x06, 0x06, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + // Entry 240 - 27F + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x03, + 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, + 0x03, 0x03, 0x04, 0x04, 0x03, 0x03, 0x03, 0x03, + 0x03, 0x03, 0x03, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x03, 0x03, 0x03, 0x03, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x03, 0x03, 0x03, 0x03, 0x04, 0x04, + // Entry 280 - 2BF + 0x04, 0x03, 0x03, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x03, 0x03, 0x03, + 0x03, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x06, + 0x06, 0x06, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x03, 0x03, 0x04, 0x04, 0x04, 0x04, 0x0e, + // Entry 2C0 - 2FF + 0x0e, 0x0e, 0x04, 0x03, 0x03, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x03, + 0x03, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, + // Entry 300 - 33F + 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x0a, +} // Size: 799 bytes + +var formats = []Pattern{Pattern{RoundingContext: RoundingContext{MaxSignificantDigits: 0, + MaxFractionDigits: 0, + Increment: 0x0, + IncrementScale: 0x0, + Mode: 0x0, + DigitShift: 0x0, + MinIntegerDigits: 0x0, + MaxIntegerDigits: 0x0, + MinFractionDigits: 0x0, + MinSignificantDigits: 0x0, + MinExponentDigits: 0x0}, + Affix: "", + Offset: 0x0, + NegOffset: 0x0, + PadRune: 0, + FormatWidth: 0x0, + GroupingSize: [2]uint8{0x0, + 0x0}, + Flags: 0x0}, + Pattern{RoundingContext: RoundingContext{MaxSignificantDigits: 0, + MaxFractionDigits: 3, + Increment: 0x0, + IncrementScale: 0x0, + Mode: 0x0, + DigitShift: 0x0, + MinIntegerDigits: 0x1, + MaxIntegerDigits: 0x0, + MinFractionDigits: 0x0, + MinSignificantDigits: 0x0, + MinExponentDigits: 0x0}, + Affix: "", + Offset: 0x0, + NegOffset: 0x0, + PadRune: 0, + FormatWidth: 0x9, + GroupingSize: [2]uint8{0x3, + 0x0}, + Flags: 0x0}, + Pattern{RoundingContext: RoundingContext{MaxSignificantDigits: 0, + MaxFractionDigits: 0, + Increment: 0x0, + IncrementScale: 0x0, + Mode: 0x0, + DigitShift: 0x0, + MinIntegerDigits: 0x0, + MaxIntegerDigits: 0x1, + MinFractionDigits: 0x0, + MinSignificantDigits: 0x0, + MinExponentDigits: 0x1}, + Affix: "", + Offset: 0x0, + NegOffset: 0x0, + PadRune: 0, + FormatWidth: 0x3, + GroupingSize: [2]uint8{0x0, + 0x0}, + Flags: 0x0}, + Pattern{RoundingContext: RoundingContext{MaxSignificantDigits: 0, + MaxFractionDigits: 0, + Increment: 0x0, + IncrementScale: 0x0, + Mode: 0x0, + DigitShift: 0x2, + MinIntegerDigits: 0x1, + MaxIntegerDigits: 0x0, + MinFractionDigits: 0x0, + MinSignificantDigits: 0x0, + MinExponentDigits: 0x0}, + Affix: "\x00\x03\u00a0%", + Offset: 0x0, + NegOffset: 0x0, + PadRune: 0, + FormatWidth: 0x7, + GroupingSize: [2]uint8{0x3, + 0x0}, + Flags: 0x0}, + Pattern{RoundingContext: RoundingContext{MaxSignificantDigits: 0, + MaxFractionDigits: 0, + Increment: 0x0, + IncrementScale: 0x0, + Mode: 0x0, + DigitShift: 0x2, + MinIntegerDigits: 0x1, + MaxIntegerDigits: 0x0, + MinFractionDigits: 0x0, + MinSignificantDigits: 0x0, + MinExponentDigits: 0x0}, + Affix: "\x00\x01%", + Offset: 0x0, + NegOffset: 0x0, + PadRune: 0, + FormatWidth: 0x6, + GroupingSize: [2]uint8{0x3, + 0x0}, + Flags: 0x0}, + Pattern{RoundingContext: RoundingContext{MaxSignificantDigits: 0, + MaxFractionDigits: 3, + Increment: 0x0, + IncrementScale: 0x0, + Mode: 0x0, + DigitShift: 0x0, + MinIntegerDigits: 0x1, + MaxIntegerDigits: 0x0, + MinFractionDigits: 0x0, + MinSignificantDigits: 0x0, + MinExponentDigits: 0x0}, + Affix: "", + Offset: 0x0, + NegOffset: 0x0, + PadRune: 0, + FormatWidth: 0xc, + GroupingSize: [2]uint8{0x3, + 0x2}, + Flags: 0x0}, + Pattern{RoundingContext: RoundingContext{MaxSignificantDigits: 0, + MaxFractionDigits: 0, + Increment: 0x0, + IncrementScale: 0x0, + Mode: 0x0, + DigitShift: 0x2, + MinIntegerDigits: 0x1, + MaxIntegerDigits: 0x0, + MinFractionDigits: 0x0, + MinSignificantDigits: 0x0, + MinExponentDigits: 0x0}, + Affix: "\x00\x01%", + Offset: 0x0, + NegOffset: 0x0, + PadRune: 0, + FormatWidth: 0x9, + GroupingSize: [2]uint8{0x3, + 0x2}, + Flags: 0x0}, + Pattern{RoundingContext: RoundingContext{MaxSignificantDigits: 0, + MaxFractionDigits: 0, + Increment: 0x0, + IncrementScale: 0x0, + Mode: 0x0, + DigitShift: 0x2, + MinIntegerDigits: 0x1, + MaxIntegerDigits: 0x0, + MinFractionDigits: 0x0, + MinSignificantDigits: 0x0, + MinExponentDigits: 0x0}, + Affix: "\x00\x03\u00a0%", + Offset: 0x0, + NegOffset: 0x0, + PadRune: 0, + FormatWidth: 0xa, + GroupingSize: [2]uint8{0x3, + 0x2}, + Flags: 0x0}, + Pattern{RoundingContext: RoundingContext{MaxSignificantDigits: 0, + MaxFractionDigits: 6, + Increment: 0x0, + IncrementScale: 0x0, + Mode: 0x0, + DigitShift: 0x0, + MinIntegerDigits: 0x1, + MaxIntegerDigits: 0x0, + MinFractionDigits: 0x0, + MinSignificantDigits: 0x0, + MinExponentDigits: 0x0}, + Affix: "", + Offset: 0x0, + NegOffset: 0x0, + PadRune: 0, + FormatWidth: 0x8, + GroupingSize: [2]uint8{0x0, + 0x0}, + Flags: 0x0}, + Pattern{RoundingContext: RoundingContext{MaxSignificantDigits: 0, + MaxFractionDigits: 6, + Increment: 0x0, + IncrementScale: 0x0, + Mode: 0x0, + DigitShift: 0x0, + MinIntegerDigits: 0x1, + MaxIntegerDigits: 0x0, + MinFractionDigits: 0x6, + MinSignificantDigits: 0x0, + MinExponentDigits: 0x3}, + Affix: "", + Offset: 0x0, + NegOffset: 0x0, + PadRune: 0, + FormatWidth: 0xd, + GroupingSize: [2]uint8{0x0, + 0x0}, + Flags: 0x4}, + Pattern{RoundingContext: RoundingContext{MaxSignificantDigits: 0, + MaxFractionDigits: 0, + Increment: 0x0, + IncrementScale: 0x0, + Mode: 0x0, + DigitShift: 0x2, + MinIntegerDigits: 0x1, + MaxIntegerDigits: 0x0, + MinFractionDigits: 0x0, + MinSignificantDigits: 0x0, + MinExponentDigits: 0x0}, + Affix: "\x00\x01%", + Offset: 0x0, + NegOffset: 0x0, + PadRune: 0, + FormatWidth: 0x2, + GroupingSize: [2]uint8{0x0, + 0x0}, + Flags: 0x0}, + Pattern{RoundingContext: RoundingContext{MaxSignificantDigits: 0, + MaxFractionDigits: 0, + Increment: 0x0, + IncrementScale: 0x0, + Mode: 0x0, + DigitShift: 0x2, + MinIntegerDigits: 0x1, + MaxIntegerDigits: 0x0, + MinFractionDigits: 0x0, + MinSignificantDigits: 0x0, + MinExponentDigits: 0x0}, + Affix: "\x03%\u00a0\x00", + Offset: 0x0, + NegOffset: 0x0, + PadRune: 0, + FormatWidth: 0x7, + GroupingSize: [2]uint8{0x3, + 0x0}, + Flags: 0x0}, + Pattern{RoundingContext: RoundingContext{MaxSignificantDigits: 0, + MaxFractionDigits: 0, + Increment: 0x0, + IncrementScale: 0x0, + Mode: 0x0, + DigitShift: 0x0, + MinIntegerDigits: 0x0, + MaxIntegerDigits: 0x1, + MinFractionDigits: 0x0, + MinSignificantDigits: 0x0, + MinExponentDigits: 0x1}, + Affix: "\x01[\x01]", + Offset: 0x0, + NegOffset: 0x0, + PadRune: 0, + FormatWidth: 0x5, + GroupingSize: [2]uint8{0x0, + 0x0}, + Flags: 0x0}, + Pattern{RoundingContext: RoundingContext{MaxSignificantDigits: 0, + MaxFractionDigits: 0, + Increment: 0x0, + IncrementScale: 0x0, + Mode: 0x0, + DigitShift: 0x0, + MinIntegerDigits: 0x0, + MaxIntegerDigits: 0x0, + MinFractionDigits: 0x0, + MinSignificantDigits: 0x0, + MinExponentDigits: 0x0}, + Affix: "", + Offset: 0x0, + NegOffset: 0x0, + PadRune: 0, + FormatWidth: 0x1, + GroupingSize: [2]uint8{0x0, + 0x0}, + Flags: 0x0}, + Pattern{RoundingContext: RoundingContext{MaxSignificantDigits: 0, + MaxFractionDigits: 0, + Increment: 0x0, + IncrementScale: 0x0, + Mode: 0x0, + DigitShift: 0x2, + MinIntegerDigits: 0x1, + MaxIntegerDigits: 0x0, + MinFractionDigits: 0x0, + MinSignificantDigits: 0x0, + MinExponentDigits: 0x0}, + Affix: "\x01%\x00", + Offset: 0x0, + NegOffset: 0x0, + PadRune: 0, + FormatWidth: 0x6, + GroupingSize: [2]uint8{0x3, + 0x0}, + Flags: 0x0}} + +// Total table size 8634 bytes (8KiB); checksum: 8F23386D diff --git a/vendor/golang.org/x/text/internal/stringset/set.go b/vendor/golang.org/x/text/internal/stringset/set.go new file mode 100644 index 00000000..bb2fffbc --- /dev/null +++ b/vendor/golang.org/x/text/internal/stringset/set.go @@ -0,0 +1,86 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package stringset provides a way to represent a collection of strings +// compactly. +package stringset + +import "sort" + +// A Set holds a collection of strings that can be looked up by an index number. +type Set struct { + // These fields are exported to allow for code generation. + + Data string + Index []uint16 +} + +// Elem returns the string with index i. It panics if i is out of range. +func (s *Set) Elem(i int) string { + return s.Data[s.Index[i]:s.Index[i+1]] +} + +// Len returns the number of strings in the set. +func (s *Set) Len() int { + return len(s.Index) - 1 +} + +// Search returns the index of the given string or -1 if it is not in the set. +// The Set must have been created with strings in sorted order. +func Search(s *Set, str string) int { + // TODO: optimize this if it gets used a lot. + n := len(s.Index) - 1 + p := sort.Search(n, func(i int) bool { + return s.Elem(i) >= str + }) + if p == n || str != s.Elem(p) { + return -1 + } + return p +} + +// A Builder constructs Sets. +type Builder struct { + set Set + index map[string]int +} + +// NewBuilder returns a new and initialized Builder. +func NewBuilder() *Builder { + return &Builder{ + set: Set{ + Index: []uint16{0}, + }, + index: map[string]int{}, + } +} + +// Set creates the set created so far. +func (b *Builder) Set() Set { + return b.set +} + +// Index returns the index for the given string, which must have been added +// before. +func (b *Builder) Index(s string) int { + return b.index[s] +} + +// Add adds a string to the index. Strings that are added by a single Add will +// be stored together, unless they match an existing string. +func (b *Builder) Add(ss ...string) { + // First check if the string already exists. + for _, s := range ss { + if _, ok := b.index[s]; ok { + continue + } + b.index[s] = len(b.set.Index) - 1 + b.set.Data += s + x := len(b.set.Data) + if x > 0xFFFF { + panic("Index too > 0xFFFF") + } + b.set.Index = append(b.set.Index, uint16(x)) + } +} diff --git a/vendor/golang.org/x/text/message/catalog.go b/vendor/golang.org/x/text/message/catalog.go new file mode 100644 index 00000000..068271de --- /dev/null +++ b/vendor/golang.org/x/text/message/catalog.go @@ -0,0 +1,36 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package message + +// TODO: some types in this file will need to be made public at some time. +// Documentation and method names will reflect this by using the exported name. + +import ( + "golang.org/x/text/language" + "golang.org/x/text/message/catalog" +) + +// MatchLanguage reports the matched tag obtained from language.MatchStrings for +// the Matcher of the DefaultCatalog. +func MatchLanguage(preferred ...string) language.Tag { + c := DefaultCatalog + tag, _ := language.MatchStrings(c.Matcher(), preferred...) + return tag +} + +// DefaultCatalog is used by SetString. +var DefaultCatalog catalog.Catalog = defaultCatalog + +var defaultCatalog = catalog.NewBuilder() + +// SetString calls SetString on the initial default Catalog. +func SetString(tag language.Tag, key string, msg string) error { + return defaultCatalog.SetString(tag, key, msg) +} + +// Set calls Set on the initial default Catalog. +func Set(tag language.Tag, key string, msg ...catalog.Message) error { + return defaultCatalog.Set(tag, key, msg...) +} diff --git a/vendor/golang.org/x/text/message/catalog/catalog.go b/vendor/golang.org/x/text/message/catalog/catalog.go new file mode 100644 index 00000000..d06ba2f0 --- /dev/null +++ b/vendor/golang.org/x/text/message/catalog/catalog.go @@ -0,0 +1,365 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package catalog defines collections of translated format strings. +// +// This package mostly defines types for populating catalogs with messages. The +// catmsg package contains further definitions for creating custom message and +// dictionary types as well as packages that use Catalogs. +// +// Package catalog defines various interfaces: Dictionary, Loader, and Message. +// A Dictionary maintains a set of translations of format strings for a single +// language. The Loader interface defines a source of dictionaries. A +// translation of a format string is represented by a Message. +// +// # Catalogs +// +// A Catalog defines a programmatic interface for setting message translations. +// It maintains a set of per-language dictionaries with translations for a set +// of keys. For message translation to function properly, a translation should +// be defined for each key for each supported language. A dictionary may be +// underspecified, though, if there is a parent language that already defines +// the key. For example, a Dictionary for "en-GB" could leave out entries that +// are identical to those in a dictionary for "en". +// +// # Messages +// +// A Message is a format string which varies on the value of substitution +// variables. For instance, to indicate the number of results one could want "no +// results" if there are none, "1 result" if there is 1, and "%d results" for +// any other number. Catalog is agnostic to the kind of format strings that are +// used: for instance, messages can follow either the printf-style substitution +// from package fmt or use templates. +// +// A Message does not substitute arguments in the format string. This job is +// reserved for packages that render strings, such as message, that use Catalogs +// to selected string. This separation of concerns allows Catalog to be used to +// store any kind of formatting strings. +// +// # Selecting messages based on linguistic features of substitution arguments +// +// Messages may vary based on any linguistic features of the argument values. +// The most common one is plural form, but others exist. +// +// Selection messages are provided in packages that provide support for a +// specific linguistic feature. The following snippet uses plural.Selectf: +// +// catalog.Set(language.English, "You are %d minute(s) late.", +// plural.Selectf(1, "", +// plural.One, "You are 1 minute late.", +// plural.Other, "You are %d minutes late.")) +// +// In this example, a message is stored in the Catalog where one of two messages +// is selected based on the first argument, a number. The first message is +// selected if the argument is singular (identified by the selector "one") and +// the second message is selected in all other cases. The selectors are defined +// by the plural rules defined in CLDR. The selector "other" is special and will +// always match. Each language always defines one of the linguistic categories +// to be "other." For English, singular is "one" and plural is "other". +// +// Selects can be nested. This allows selecting sentences based on features of +// multiple arguments or multiple linguistic properties of a single argument. +// +// # String interpolation +// +// There is often a lot of commonality between the possible variants of a +// message. For instance, in the example above the word "minute" varies based on +// the plural catogory of the argument, but the rest of the sentence is +// identical. Using interpolation the above message can be rewritten as: +// +// catalog.Set(language.English, "You are %d minute(s) late.", +// catalog.Var("minutes", +// plural.Selectf(1, "", plural.One, "minute", plural.Other, "minutes")), +// catalog.String("You are %[1]d ${minutes} late.")) +// +// Var is defined to return the variable name if the message does not yield a +// match. This allows us to further simplify this snippet to +// +// catalog.Set(language.English, "You are %d minute(s) late.", +// catalog.Var("minutes", plural.Selectf(1, "", plural.One, "minute")), +// catalog.String("You are %d ${minutes} late.")) +// +// Overall this is still only a minor improvement, but things can get a lot more +// unwieldy if more than one linguistic feature is used to determine a message +// variant. Consider the following example: +// +// // argument 1: list of hosts, argument 2: list of guests +// catalog.Set(language.English, "%[1]v invite(s) %[2]v to their party.", +// catalog.Var("their", +// plural.Selectf(1, "" +// plural.One, gender.Select(1, "female", "her", "other", "his"))), +// catalog.Var("invites", plural.Selectf(1, "", plural.One, "invite")) +// catalog.String("%[1]v ${invites} %[2]v to ${their} party.")), +// +// Without variable substitution, this would have to be written as +// +// // argument 1: list of hosts, argument 2: list of guests +// catalog.Set(language.English, "%[1]v invite(s) %[2]v to their party.", +// plural.Selectf(1, "", +// plural.One, gender.Select(1, +// "female", "%[1]v invites %[2]v to her party." +// "other", "%[1]v invites %[2]v to his party."), +// plural.Other, "%[1]v invites %[2]v to their party.")) +// +// Not necessarily shorter, but using variables there is less duplication and +// the messages are more maintenance friendly. Moreover, languages may have up +// to six plural forms. This makes the use of variables more welcome. +// +// Different messages using the same inflections can reuse variables by moving +// them to macros. Using macros we can rewrite the message as: +// +// // argument 1: list of hosts, argument 2: list of guests +// catalog.SetString(language.English, "%[1]v invite(s) %[2]v to their party.", +// "%[1]v ${invites(1)} %[2]v to ${their(1)} party.") +// +// Where the following macros were defined separately. +// +// catalog.SetMacro(language.English, "invites", plural.Selectf(1, "", +// plural.One, "invite")) +// catalog.SetMacro(language.English, "their", plural.Selectf(1, "", +// plural.One, gender.Select(1, "female", "her", "other", "his"))), +// +// Placeholders use parentheses and the arguments to invoke a macro. +// +// # Looking up messages +// +// Message lookup using Catalogs is typically only done by specialized packages +// and is not something the user should be concerned with. For instance, to +// express the tardiness of a user using the related message we defined earlier, +// the user may use the package message like so: +// +// p := message.NewPrinter(language.English) +// p.Printf("You are %d minute(s) late.", 5) +// +// Which would print: +// +// You are 5 minutes late. +// +// This package is UNDER CONSTRUCTION and its API may change. +package catalog // import "golang.org/x/text/message/catalog" + +// TODO: +// Some way to freeze a catalog. +// - Locking on each lockup turns out to be about 50% of the total running time +// for some of the benchmarks in the message package. +// Consider these: +// - Sequence type to support sequences in user-defined messages. +// - Garbage collection: Remove dictionaries that can no longer be reached +// as other dictionaries have been added that cover all possible keys. + +import ( + "errors" + "fmt" + + "golang.org/x/text/internal" + + "golang.org/x/text/internal/catmsg" + "golang.org/x/text/language" +) + +// A Catalog allows lookup of translated messages. +type Catalog interface { + // Languages returns all languages for which the Catalog contains variants. + Languages() []language.Tag + + // Matcher returns a Matcher for languages from this Catalog. + Matcher() language.Matcher + + // A Context is used for evaluating Messages. + Context(tag language.Tag, r catmsg.Renderer) *Context + + // This method also makes Catalog a private interface. + lookup(tag language.Tag, key string) (data string, ok bool) +} + +// NewFromMap creates a Catalog from the given map. If a Dictionary is +// underspecified the entry is retrieved from a parent language. +func NewFromMap(dictionaries map[string]Dictionary, opts ...Option) (Catalog, error) { + options := options{} + for _, o := range opts { + o(&options) + } + c := &catalog{ + dicts: map[language.Tag]Dictionary{}, + } + _, hasFallback := dictionaries[options.fallback.String()] + if hasFallback { + // TODO: Should it be okay to not have a fallback language? + // Catalog generators could enforce there is always a fallback. + c.langs = append(c.langs, options.fallback) + } + for lang, dict := range dictionaries { + tag, err := language.Parse(lang) + if err != nil { + return nil, fmt.Errorf("catalog: invalid language tag %q", lang) + } + if _, ok := c.dicts[tag]; ok { + return nil, fmt.Errorf("catalog: duplicate entry for tag %q after normalization", tag) + } + c.dicts[tag] = dict + if !hasFallback || tag != options.fallback { + c.langs = append(c.langs, tag) + } + } + if hasFallback { + internal.SortTags(c.langs[1:]) + } else { + internal.SortTags(c.langs) + } + c.matcher = language.NewMatcher(c.langs) + return c, nil +} + +// A Dictionary is a source of translations for a single language. +type Dictionary interface { + // Lookup returns a message compiled with catmsg.Compile for the given key. + // It returns false for ok if such a message could not be found. + Lookup(key string) (data string, ok bool) +} + +type catalog struct { + langs []language.Tag + dicts map[language.Tag]Dictionary + macros store + matcher language.Matcher +} + +func (c *catalog) Languages() []language.Tag { return c.langs } +func (c *catalog) Matcher() language.Matcher { return c.matcher } + +func (c *catalog) lookup(tag language.Tag, key string) (data string, ok bool) { + for ; ; tag = tag.Parent() { + if dict, ok := c.dicts[tag]; ok { + if data, ok := dict.Lookup(key); ok { + return data, true + } + } + if tag == language.Und { + break + } + } + return "", false +} + +// Context returns a Context for formatting messages. +// Only one Message may be formatted per context at any given time. +func (c *catalog) Context(tag language.Tag, r catmsg.Renderer) *Context { + return &Context{ + cat: c, + tag: tag, + dec: catmsg.NewDecoder(tag, r, &dict{&c.macros, tag}), + } +} + +// A Builder allows building a Catalog programmatically. +type Builder struct { + options + matcher language.Matcher + + index store + macros store +} + +type options struct { + fallback language.Tag +} + +// An Option configures Catalog behavior. +type Option func(*options) + +// Fallback specifies the default fallback language. The default is Und. +func Fallback(tag language.Tag) Option { + return func(o *options) { o.fallback = tag } +} + +// TODO: +// // Catalogs specifies one or more sources for a Catalog. +// // Lookups are in order. +// // This can be changed inserting a Catalog used for setting, which implements +// // Loader, used for setting in the chain. +// func Catalogs(d ...Loader) Option { +// return nil +// } +// +// func Delims(start, end string) Option {} +// +// func Dict(tag language.Tag, d ...Dictionary) Option + +// NewBuilder returns an empty mutable Catalog. +func NewBuilder(opts ...Option) *Builder { + c := &Builder{} + for _, o := range opts { + o(&c.options) + } + return c +} + +// SetString is shorthand for Set(tag, key, String(msg)). +func (c *Builder) SetString(tag language.Tag, key string, msg string) error { + return c.set(tag, key, &c.index, String(msg)) +} + +// Set sets the translation for the given language and key. +// +// When evaluation this message, the first Message in the sequence to msgs to +// evaluate to a string will be the message returned. +func (c *Builder) Set(tag language.Tag, key string, msg ...Message) error { + return c.set(tag, key, &c.index, msg...) +} + +// SetMacro defines a Message that may be substituted in another message. +// The arguments to a macro Message are passed as arguments in the +// placeholder the form "${foo(arg1, arg2)}". +func (c *Builder) SetMacro(tag language.Tag, name string, msg ...Message) error { + return c.set(tag, name, &c.macros, msg...) +} + +// ErrNotFound indicates there was no message for the given key. +var ErrNotFound = errors.New("catalog: message not found") + +// String specifies a plain message string. It can be used as fallback if no +// other strings match or as a simple standalone message. +// +// It is an error to pass more than one String in a message sequence. +func String(name string) Message { + return catmsg.String(name) +} + +// Var sets a variable that may be substituted in formatting patterns using +// named substitution of the form "${name}". The name argument is used as a +// fallback if the statements do not produce a match. The statement sequence may +// not contain any Var calls. +// +// The name passed to a Var must be unique within message sequence. +func Var(name string, msg ...Message) Message { + return &catmsg.Var{Name: name, Message: catmsg.FirstOf(msg)} +} + +// Context returns a Context for formatting messages. +// Only one Message may be formatted per context at any given time. +func (b *Builder) Context(tag language.Tag, r catmsg.Renderer) *Context { + return &Context{ + cat: b, + tag: tag, + dec: catmsg.NewDecoder(tag, r, &dict{&b.macros, tag}), + } +} + +// A Context is used for evaluating Messages. +// Only one Message may be formatted per context at any given time. +type Context struct { + cat Catalog + tag language.Tag // TODO: use compact index. + dec *catmsg.Decoder +} + +// Execute looks up and executes the message with the given key. +// It returns ErrNotFound if no message could be found in the index. +func (c *Context) Execute(key string) error { + data, ok := c.cat.lookup(c.tag, key) + if !ok { + return ErrNotFound + } + return c.dec.Execute(data) +} diff --git a/vendor/golang.org/x/text/message/catalog/dict.go b/vendor/golang.org/x/text/message/catalog/dict.go new file mode 100644 index 00000000..1416e7b0 --- /dev/null +++ b/vendor/golang.org/x/text/message/catalog/dict.go @@ -0,0 +1,133 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package catalog + +import ( + "sync" + + "golang.org/x/text/internal" + "golang.org/x/text/internal/catmsg" + "golang.org/x/text/language" +) + +// A Message holds a collection of translations for the same phrase that may +// vary based on the values of substitution arguments. +type Message = catmsg.Message + +// TODO: +// Dictionary returns a Dictionary that returns the first Message, using the +// given language tag, that matches: +// 1. the last one registered by one of the Set methods +// 2. returned by one of the Loaders +// 3. repeat from 1. using the parent language +// This approach allows messages to be underspecified. +// func (c *Catalog) Dictionary(tag language.Tag) (Dictionary, error) { +// // TODO: verify dictionary exists. +// return &dict{&c.index, tag}, nil +// } + +type dict struct { + s *store + tag language.Tag // TODO: make compact tag. +} + +func (d *dict) Lookup(key string) (data string, ok bool) { + return d.s.lookup(d.tag, key) +} + +func (b *Builder) lookup(tag language.Tag, key string) (data string, ok bool) { + return b.index.lookup(tag, key) +} + +func (c *Builder) set(tag language.Tag, key string, s *store, msg ...Message) error { + data, err := catmsg.Compile(tag, &dict{&c.macros, tag}, catmsg.FirstOf(msg)) + + s.mutex.Lock() + defer s.mutex.Unlock() + + m := s.index[tag] + if m == nil { + m = msgMap{} + if s.index == nil { + s.index = map[language.Tag]msgMap{} + } + c.matcher = nil + s.index[tag] = m + } + + m[key] = data + return err +} + +func (c *Builder) Matcher() language.Matcher { + c.index.mutex.RLock() + m := c.matcher + c.index.mutex.RUnlock() + if m != nil { + return m + } + + c.index.mutex.Lock() + if c.matcher == nil { + c.matcher = language.NewMatcher(c.unlockedLanguages()) + } + m = c.matcher + c.index.mutex.Unlock() + return m +} + +type store struct { + mutex sync.RWMutex + index map[language.Tag]msgMap +} + +type msgMap map[string]string + +func (s *store) lookup(tag language.Tag, key string) (data string, ok bool) { + s.mutex.RLock() + defer s.mutex.RUnlock() + + for ; ; tag = tag.Parent() { + if msgs, ok := s.index[tag]; ok { + if msg, ok := msgs[key]; ok { + return msg, true + } + } + if tag == language.Und { + break + } + } + return "", false +} + +// Languages returns all languages for which the Catalog contains variants. +func (b *Builder) Languages() []language.Tag { + s := &b.index + s.mutex.RLock() + defer s.mutex.RUnlock() + + return b.unlockedLanguages() +} + +func (b *Builder) unlockedLanguages() []language.Tag { + s := &b.index + if len(s.index) == 0 { + return nil + } + tags := make([]language.Tag, 0, len(s.index)) + _, hasFallback := s.index[b.options.fallback] + offset := 0 + if hasFallback { + tags = append(tags, b.options.fallback) + offset = 1 + } + for t := range s.index { + if t != b.options.fallback { + tags = append(tags, t) + } + } + internal.SortTags(tags[offset:]) + return tags +} diff --git a/vendor/golang.org/x/text/message/doc.go b/vendor/golang.org/x/text/message/doc.go new file mode 100644 index 00000000..4bf7bdca --- /dev/null +++ b/vendor/golang.org/x/text/message/doc.go @@ -0,0 +1,99 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package message implements formatted I/O for localized strings with functions +// analogous to the fmt's print functions. It is a drop-in replacement for fmt. +// +// # Localized Formatting +// +// A format string can be localized by replacing any of the print functions of +// fmt with an equivalent call to a Printer. +// +// p := message.NewPrinter(message.MatchLanguage("en")) +// p.Println(123456.78) // Prints 123,456.78 +// +// p.Printf("%d ducks in a row", 4331) // Prints 4,331 ducks in a row +// +// p := message.NewPrinter(message.MatchLanguage("nl")) +// p.Printf("Hoogte: %.1f meter", 1244.9) // Prints Hoogte: 1,244.9 meter +// +// p := message.NewPrinter(message.MatchLanguage("bn")) +// p.Println(123456.78) // Prints ১,২৩,৪৫৬.৭৮ +// +// Printer currently supports numbers and specialized types for which packages +// exist in x/text. Other builtin types such as time.Time and slices are +// planned. +// +// Format strings largely have the same meaning as with fmt with the following +// notable exceptions: +// - flag # always resorts to fmt for printing +// - verb 'f', 'e', 'g', 'd' use localized formatting unless the '#' flag is +// specified. +// - verb 'm' inserts a translation of a string argument. +// +// See package fmt for more options. +// +// # Translation +// +// The format strings that are passed to Printf, Sprintf, Fprintf, or Errorf +// are used as keys to look up translations for the specified languages. +// More on how these need to be specified below. +// +// One can use arbitrary keys to distinguish between otherwise ambiguous +// strings: +// +// p := message.NewPrinter(language.English) +// p.Printf("archive(noun)") // Prints "archive" +// p.Printf("archive(verb)") // Prints "archive" +// +// p := message.NewPrinter(language.German) +// p.Printf("archive(noun)") // Prints "Archiv" +// p.Printf("archive(verb)") // Prints "archivieren" +// +// To retain the fallback functionality, use Key: +// +// p.Printf(message.Key("archive(noun)", "archive")) +// p.Printf(message.Key("archive(verb)", "archive")) +// +// # Translation Pipeline +// +// Format strings that contain text need to be translated to support different +// locales. The first step is to extract strings that need to be translated. +// +// 1. Install gotext +// +// go get -u golang.org/x/text/cmd/gotext +// gotext -help +// +// 2. Mark strings in your source to be translated by using message.Printer, +// instead of the functions of the fmt package. +// +// 3. Extract the strings from your source +// +// gotext extract +// +// The output will be written to the textdata directory. +// +// 4. Send the files for translation +// +// It is planned to support multiple formats, but for now one will have to +// rewrite the JSON output to the desired format. +// +// 5. Inject translations into program +// +// 6. Repeat from 2 +// +// Right now this has to be done programmatically with calls to Set or +// SetString. These functions as well as the methods defined in +// see also package golang.org/x/text/message/catalog can be used to implement +// either dynamic or static loading of messages. +// +// # Plural and Gender Forms +// +// Translated messages can vary based on the plural and gender forms of +// substitution values. In general, it is up to the translators to provide +// alternative translations for such forms. See the packages in +// golang.org/x/text/feature and golang.org/x/text/message/catalog for more +// information. +package message diff --git a/vendor/golang.org/x/text/message/format.go b/vendor/golang.org/x/text/message/format.go new file mode 100644 index 00000000..a47d17dd --- /dev/null +++ b/vendor/golang.org/x/text/message/format.go @@ -0,0 +1,510 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package message + +import ( + "bytes" + "strconv" + "unicode/utf8" + + "golang.org/x/text/internal/format" +) + +const ( + ldigits = "0123456789abcdefx" + udigits = "0123456789ABCDEFX" +) + +const ( + signed = true + unsigned = false +) + +// A formatInfo is the raw formatter used by Printf etc. +// It prints into a buffer that must be set up separately. +type formatInfo struct { + buf *bytes.Buffer + + format.Parser + + // intbuf is large enough to store %b of an int64 with a sign and + // avoids padding at the end of the struct on 32 bit architectures. + intbuf [68]byte +} + +func (f *formatInfo) init(buf *bytes.Buffer) { + f.ClearFlags() + f.buf = buf +} + +// writePadding generates n bytes of padding. +func (f *formatInfo) writePadding(n int) { + if n <= 0 { // No padding bytes needed. + return + } + f.buf.Grow(n) + // Decide which byte the padding should be filled with. + padByte := byte(' ') + if f.Zero { + padByte = byte('0') + } + // Fill padding with padByte. + for i := 0; i < n; i++ { + f.buf.WriteByte(padByte) // TODO: make more efficient. + } +} + +// pad appends b to f.buf, padded on left (!f.minus) or right (f.minus). +func (f *formatInfo) pad(b []byte) { + if !f.WidthPresent || f.Width == 0 { + f.buf.Write(b) + return + } + width := f.Width - utf8.RuneCount(b) + if !f.Minus { + // left padding + f.writePadding(width) + f.buf.Write(b) + } else { + // right padding + f.buf.Write(b) + f.writePadding(width) + } +} + +// padString appends s to f.buf, padded on left (!f.minus) or right (f.minus). +func (f *formatInfo) padString(s string) { + if !f.WidthPresent || f.Width == 0 { + f.buf.WriteString(s) + return + } + width := f.Width - utf8.RuneCountInString(s) + if !f.Minus { + // left padding + f.writePadding(width) + f.buf.WriteString(s) + } else { + // right padding + f.buf.WriteString(s) + f.writePadding(width) + } +} + +// fmt_boolean formats a boolean. +func (f *formatInfo) fmt_boolean(v bool) { + if v { + f.padString("true") + } else { + f.padString("false") + } +} + +// fmt_unicode formats a uint64 as "U+0078" or with f.sharp set as "U+0078 'x'". +func (f *formatInfo) fmt_unicode(u uint64) { + buf := f.intbuf[0:] + + // With default precision set the maximum needed buf length is 18 + // for formatting -1 with %#U ("U+FFFFFFFFFFFFFFFF") which fits + // into the already allocated intbuf with a capacity of 68 bytes. + prec := 4 + if f.PrecPresent && f.Prec > 4 { + prec = f.Prec + // Compute space needed for "U+" , number, " '", character, "'". + width := 2 + prec + 2 + utf8.UTFMax + 1 + if width > len(buf) { + buf = make([]byte, width) + } + } + + // Format into buf, ending at buf[i]. Formatting numbers is easier right-to-left. + i := len(buf) + + // For %#U we want to add a space and a quoted character at the end of the buffer. + if f.Sharp && u <= utf8.MaxRune && strconv.IsPrint(rune(u)) { + i-- + buf[i] = '\'' + i -= utf8.RuneLen(rune(u)) + utf8.EncodeRune(buf[i:], rune(u)) + i-- + buf[i] = '\'' + i-- + buf[i] = ' ' + } + // Format the Unicode code point u as a hexadecimal number. + for u >= 16 { + i-- + buf[i] = udigits[u&0xF] + prec-- + u >>= 4 + } + i-- + buf[i] = udigits[u] + prec-- + // Add zeros in front of the number until requested precision is reached. + for prec > 0 { + i-- + buf[i] = '0' + prec-- + } + // Add a leading "U+". + i-- + buf[i] = '+' + i-- + buf[i] = 'U' + + oldZero := f.Zero + f.Zero = false + f.pad(buf[i:]) + f.Zero = oldZero +} + +// fmt_integer formats signed and unsigned integers. +func (f *formatInfo) fmt_integer(u uint64, base int, isSigned bool, digits string) { + negative := isSigned && int64(u) < 0 + if negative { + u = -u + } + + buf := f.intbuf[0:] + // The already allocated f.intbuf with a capacity of 68 bytes + // is large enough for integer formatting when no precision or width is set. + if f.WidthPresent || f.PrecPresent { + // Account 3 extra bytes for possible addition of a sign and "0x". + width := 3 + f.Width + f.Prec // wid and prec are always positive. + if width > len(buf) { + // We're going to need a bigger boat. + buf = make([]byte, width) + } + } + + // Two ways to ask for extra leading zero digits: %.3d or %03d. + // If both are specified the f.zero flag is ignored and + // padding with spaces is used instead. + prec := 0 + if f.PrecPresent { + prec = f.Prec + // Precision of 0 and value of 0 means "print nothing" but padding. + if prec == 0 && u == 0 { + oldZero := f.Zero + f.Zero = false + f.writePadding(f.Width) + f.Zero = oldZero + return + } + } else if f.Zero && f.WidthPresent { + prec = f.Width + if negative || f.Plus || f.Space { + prec-- // leave room for sign + } + } + + // Because printing is easier right-to-left: format u into buf, ending at buf[i]. + // We could make things marginally faster by splitting the 32-bit case out + // into a separate block but it's not worth the duplication, so u has 64 bits. + i := len(buf) + // Use constants for the division and modulo for more efficient code. + // Switch cases ordered by popularity. + switch base { + case 10: + for u >= 10 { + i-- + next := u / 10 + buf[i] = byte('0' + u - next*10) + u = next + } + case 16: + for u >= 16 { + i-- + buf[i] = digits[u&0xF] + u >>= 4 + } + case 8: + for u >= 8 { + i-- + buf[i] = byte('0' + u&7) + u >>= 3 + } + case 2: + for u >= 2 { + i-- + buf[i] = byte('0' + u&1) + u >>= 1 + } + default: + panic("fmt: unknown base; can't happen") + } + i-- + buf[i] = digits[u] + for i > 0 && prec > len(buf)-i { + i-- + buf[i] = '0' + } + + // Various prefixes: 0x, -, etc. + if f.Sharp { + switch base { + case 8: + if buf[i] != '0' { + i-- + buf[i] = '0' + } + case 16: + // Add a leading 0x or 0X. + i-- + buf[i] = digits[16] + i-- + buf[i] = '0' + } + } + + if negative { + i-- + buf[i] = '-' + } else if f.Plus { + i-- + buf[i] = '+' + } else if f.Space { + i-- + buf[i] = ' ' + } + + // Left padding with zeros has already been handled like precision earlier + // or the f.zero flag is ignored due to an explicitly set precision. + oldZero := f.Zero + f.Zero = false + f.pad(buf[i:]) + f.Zero = oldZero +} + +// truncate truncates the string to the specified precision, if present. +func (f *formatInfo) truncate(s string) string { + if f.PrecPresent { + n := f.Prec + for i := range s { + n-- + if n < 0 { + return s[:i] + } + } + } + return s +} + +// fmt_s formats a string. +func (f *formatInfo) fmt_s(s string) { + s = f.truncate(s) + f.padString(s) +} + +// fmt_sbx formats a string or byte slice as a hexadecimal encoding of its bytes. +func (f *formatInfo) fmt_sbx(s string, b []byte, digits string) { + length := len(b) + if b == nil { + // No byte slice present. Assume string s should be encoded. + length = len(s) + } + // Set length to not process more bytes than the precision demands. + if f.PrecPresent && f.Prec < length { + length = f.Prec + } + // Compute width of the encoding taking into account the f.sharp and f.space flag. + width := 2 * length + if width > 0 { + if f.Space { + // Each element encoded by two hexadecimals will get a leading 0x or 0X. + if f.Sharp { + width *= 2 + } + // Elements will be separated by a space. + width += length - 1 + } else if f.Sharp { + // Only a leading 0x or 0X will be added for the whole string. + width += 2 + } + } else { // The byte slice or string that should be encoded is empty. + if f.WidthPresent { + f.writePadding(f.Width) + } + return + } + // Handle padding to the left. + if f.WidthPresent && f.Width > width && !f.Minus { + f.writePadding(f.Width - width) + } + // Write the encoding directly into the output buffer. + buf := f.buf + if f.Sharp { + // Add leading 0x or 0X. + buf.WriteByte('0') + buf.WriteByte(digits[16]) + } + var c byte + for i := 0; i < length; i++ { + if f.Space && i > 0 { + // Separate elements with a space. + buf.WriteByte(' ') + if f.Sharp { + // Add leading 0x or 0X for each element. + buf.WriteByte('0') + buf.WriteByte(digits[16]) + } + } + if b != nil { + c = b[i] // Take a byte from the input byte slice. + } else { + c = s[i] // Take a byte from the input string. + } + // Encode each byte as two hexadecimal digits. + buf.WriteByte(digits[c>>4]) + buf.WriteByte(digits[c&0xF]) + } + // Handle padding to the right. + if f.WidthPresent && f.Width > width && f.Minus { + f.writePadding(f.Width - width) + } +} + +// fmt_sx formats a string as a hexadecimal encoding of its bytes. +func (f *formatInfo) fmt_sx(s, digits string) { + f.fmt_sbx(s, nil, digits) +} + +// fmt_bx formats a byte slice as a hexadecimal encoding of its bytes. +func (f *formatInfo) fmt_bx(b []byte, digits string) { + f.fmt_sbx("", b, digits) +} + +// fmt_q formats a string as a double-quoted, escaped Go string constant. +// If f.sharp is set a raw (backquoted) string may be returned instead +// if the string does not contain any control characters other than tab. +func (f *formatInfo) fmt_q(s string) { + s = f.truncate(s) + if f.Sharp && strconv.CanBackquote(s) { + f.padString("`" + s + "`") + return + } + buf := f.intbuf[:0] + if f.Plus { + f.pad(strconv.AppendQuoteToASCII(buf, s)) + } else { + f.pad(strconv.AppendQuote(buf, s)) + } +} + +// fmt_c formats an integer as a Unicode character. +// If the character is not valid Unicode, it will print '\ufffd'. +func (f *formatInfo) fmt_c(c uint64) { + r := rune(c) + if c > utf8.MaxRune { + r = utf8.RuneError + } + buf := f.intbuf[:0] + w := utf8.EncodeRune(buf[:utf8.UTFMax], r) + f.pad(buf[:w]) +} + +// fmt_qc formats an integer as a single-quoted, escaped Go character constant. +// If the character is not valid Unicode, it will print '\ufffd'. +func (f *formatInfo) fmt_qc(c uint64) { + r := rune(c) + if c > utf8.MaxRune { + r = utf8.RuneError + } + buf := f.intbuf[:0] + if f.Plus { + f.pad(strconv.AppendQuoteRuneToASCII(buf, r)) + } else { + f.pad(strconv.AppendQuoteRune(buf, r)) + } +} + +// fmt_float formats a float64. It assumes that verb is a valid format specifier +// for strconv.AppendFloat and therefore fits into a byte. +func (f *formatInfo) fmt_float(v float64, size int, verb rune, prec int) { + // Explicit precision in format specifier overrules default precision. + if f.PrecPresent { + prec = f.Prec + } + // Format number, reserving space for leading + sign if needed. + num := strconv.AppendFloat(f.intbuf[:1], v, byte(verb), prec, size) + if num[1] == '-' || num[1] == '+' { + num = num[1:] + } else { + num[0] = '+' + } + // f.space means to add a leading space instead of a "+" sign unless + // the sign is explicitly asked for by f.plus. + if f.Space && num[0] == '+' && !f.Plus { + num[0] = ' ' + } + // Special handling for infinities and NaN, + // which don't look like a number so shouldn't be padded with zeros. + if num[1] == 'I' || num[1] == 'N' { + oldZero := f.Zero + f.Zero = false + // Remove sign before NaN if not asked for. + if num[1] == 'N' && !f.Space && !f.Plus { + num = num[1:] + } + f.pad(num) + f.Zero = oldZero + return + } + // The sharp flag forces printing a decimal point for non-binary formats + // and retains trailing zeros, which we may need to restore. + if f.Sharp && verb != 'b' { + digits := 0 + switch verb { + case 'v', 'g', 'G': + digits = prec + // If no precision is set explicitly use a precision of 6. + if digits == -1 { + digits = 6 + } + } + + // Buffer pre-allocated with enough room for + // exponent notations of the form "e+123". + var tailBuf [5]byte + tail := tailBuf[:0] + + hasDecimalPoint := false + // Starting from i = 1 to skip sign at num[0]. + for i := 1; i < len(num); i++ { + switch num[i] { + case '.': + hasDecimalPoint = true + case 'e', 'E': + tail = append(tail, num[i:]...) + num = num[:i] + default: + digits-- + } + } + if !hasDecimalPoint { + num = append(num, '.') + } + for digits > 0 { + num = append(num, '0') + digits-- + } + num = append(num, tail...) + } + // We want a sign if asked for and if the sign is not positive. + if f.Plus || num[0] != '+' { + // If we're zero padding to the left we want the sign before the leading zeros. + // Achieve this by writing the sign out and then padding the unsigned number. + if f.Zero && f.WidthPresent && f.Width > len(num) { + f.buf.WriteByte(num[0]) + f.writePadding(f.Width - len(num)) + f.buf.Write(num[1:]) + return + } + f.pad(num) + return + } + // No sign to show and the number is positive; just print the unsigned number. + f.pad(num[1:]) +} diff --git a/vendor/golang.org/x/text/message/message.go b/vendor/golang.org/x/text/message/message.go new file mode 100644 index 00000000..91a97264 --- /dev/null +++ b/vendor/golang.org/x/text/message/message.go @@ -0,0 +1,192 @@ +// Copyright 2015 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package message // import "golang.org/x/text/message" + +import ( + "io" + "os" + + // Include features to facilitate generated catalogs. + _ "golang.org/x/text/feature/plural" + + "golang.org/x/text/internal/number" + "golang.org/x/text/language" + "golang.org/x/text/message/catalog" +) + +// A Printer implements language-specific formatted I/O analogous to the fmt +// package. +type Printer struct { + // the language + tag language.Tag + + toDecimal number.Formatter + toScientific number.Formatter + + cat catalog.Catalog +} + +type options struct { + cat catalog.Catalog + // TODO: + // - allow %s to print integers in written form (tables are likely too large + // to enable this by default). + // - list behavior + // +} + +// An Option defines an option of a Printer. +type Option func(o *options) + +// Catalog defines the catalog to be used. +func Catalog(c catalog.Catalog) Option { + return func(o *options) { o.cat = c } +} + +// NewPrinter returns a Printer that formats messages tailored to language t. +func NewPrinter(t language.Tag, opts ...Option) *Printer { + options := &options{ + cat: DefaultCatalog, + } + for _, o := range opts { + o(options) + } + p := &Printer{ + tag: t, + cat: options.cat, + } + p.toDecimal.InitDecimal(t) + p.toScientific.InitScientific(t) + return p +} + +// Sprint is like fmt.Sprint, but using language-specific formatting. +func (p *Printer) Sprint(a ...interface{}) string { + pp := newPrinter(p) + pp.doPrint(a) + s := pp.String() + pp.free() + return s +} + +// Fprint is like fmt.Fprint, but using language-specific formatting. +func (p *Printer) Fprint(w io.Writer, a ...interface{}) (n int, err error) { + pp := newPrinter(p) + pp.doPrint(a) + n64, err := io.Copy(w, &pp.Buffer) + pp.free() + return int(n64), err +} + +// Print is like fmt.Print, but using language-specific formatting. +func (p *Printer) Print(a ...interface{}) (n int, err error) { + return p.Fprint(os.Stdout, a...) +} + +// Sprintln is like fmt.Sprintln, but using language-specific formatting. +func (p *Printer) Sprintln(a ...interface{}) string { + pp := newPrinter(p) + pp.doPrintln(a) + s := pp.String() + pp.free() + return s +} + +// Fprintln is like fmt.Fprintln, but using language-specific formatting. +func (p *Printer) Fprintln(w io.Writer, a ...interface{}) (n int, err error) { + pp := newPrinter(p) + pp.doPrintln(a) + n64, err := io.Copy(w, &pp.Buffer) + pp.free() + return int(n64), err +} + +// Println is like fmt.Println, but using language-specific formatting. +func (p *Printer) Println(a ...interface{}) (n int, err error) { + return p.Fprintln(os.Stdout, a...) +} + +// Sprintf is like fmt.Sprintf, but using language-specific formatting. +func (p *Printer) Sprintf(key Reference, a ...interface{}) string { + pp := newPrinter(p) + lookupAndFormat(pp, key, a) + s := pp.String() + pp.free() + return s +} + +// Fprintf is like fmt.Fprintf, but using language-specific formatting. +func (p *Printer) Fprintf(w io.Writer, key Reference, a ...interface{}) (n int, err error) { + pp := newPrinter(p) + lookupAndFormat(pp, key, a) + n, err = w.Write(pp.Bytes()) + pp.free() + return n, err + +} + +// Printf is like fmt.Printf, but using language-specific formatting. +func (p *Printer) Printf(key Reference, a ...interface{}) (n int, err error) { + pp := newPrinter(p) + lookupAndFormat(pp, key, a) + n, err = os.Stdout.Write(pp.Bytes()) + pp.free() + return n, err +} + +func lookupAndFormat(p *printer, r Reference, a []interface{}) { + p.fmt.Reset(a) + switch v := r.(type) { + case string: + if p.catContext.Execute(v) == catalog.ErrNotFound { + p.Render(v) + return + } + case key: + if p.catContext.Execute(v.id) == catalog.ErrNotFound && + p.catContext.Execute(v.fallback) == catalog.ErrNotFound { + p.Render(v.fallback) + return + } + default: + panic("key argument is not a Reference") + } +} + +type rawPrinter struct { + p *printer +} + +func (p rawPrinter) Render(msg string) { p.p.WriteString(msg) } +func (p rawPrinter) Arg(i int) interface{} { return nil } + +// Arg implements catmsg.Renderer. +func (p *printer) Arg(i int) interface{} { // TODO, also return "ok" bool + i-- + if uint(i) < uint(len(p.fmt.Args)) { + return p.fmt.Args[i] + } + return nil +} + +// Render implements catmsg.Renderer. +func (p *printer) Render(msg string) { + p.doPrintf(msg) +} + +// A Reference is a string or a message reference. +type Reference interface { + // TODO: also allow []string +} + +// Key creates a message Reference for a message where the given id is used for +// message lookup and the fallback is returned when no matches are found. +func Key(id string, fallback string) Reference { + return key{id, fallback} +} + +type key struct { + id, fallback string +} diff --git a/vendor/golang.org/x/text/message/print.go b/vendor/golang.org/x/text/message/print.go new file mode 100644 index 00000000..da304cc0 --- /dev/null +++ b/vendor/golang.org/x/text/message/print.go @@ -0,0 +1,984 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package message + +import ( + "bytes" + "fmt" // TODO: consider copying interfaces from package fmt to avoid dependency. + "math" + "reflect" + "sync" + "unicode/utf8" + + "golang.org/x/text/internal/format" + "golang.org/x/text/internal/number" + "golang.org/x/text/language" + "golang.org/x/text/message/catalog" +) + +// Strings for use with buffer.WriteString. +// This is less overhead than using buffer.Write with byte arrays. +const ( + commaSpaceString = ", " + nilAngleString = "" + nilParenString = "(nil)" + nilString = "nil" + mapString = "map[" + percentBangString = "%!" + missingString = "(MISSING)" + badIndexString = "(BADINDEX)" + panicString = "(PANIC=" + extraString = "%!(EXTRA " + badWidthString = "%!(BADWIDTH)" + badPrecString = "%!(BADPREC)" + noVerbString = "%!(NOVERB)" + + invReflectString = "" +) + +var printerPool = sync.Pool{ + New: func() interface{} { return new(printer) }, +} + +// newPrinter allocates a new printer struct or grabs a cached one. +func newPrinter(pp *Printer) *printer { + p := printerPool.Get().(*printer) + p.Printer = *pp + // TODO: cache most of the following call. + p.catContext = pp.cat.Context(pp.tag, p) + + p.panicking = false + p.erroring = false + p.fmt.init(&p.Buffer) + return p +} + +// free saves used printer structs in printerFree; avoids an allocation per invocation. +func (p *printer) free() { + p.Buffer.Reset() + p.arg = nil + p.value = reflect.Value{} + printerPool.Put(p) +} + +// printer is used to store a printer's state. +// It implements "golang.org/x/text/internal/format".State. +type printer struct { + Printer + + // the context for looking up message translations + catContext *catalog.Context + + // buffer for accumulating output. + bytes.Buffer + + // arg holds the current item, as an interface{}. + arg interface{} + // value is used instead of arg for reflect values. + value reflect.Value + + // fmt is used to format basic items such as integers or strings. + fmt formatInfo + + // panicking is set by catchPanic to avoid infinite panic, recover, panic, ... recursion. + panicking bool + // erroring is set when printing an error string to guard against calling handleMethods. + erroring bool +} + +// Language implements "golang.org/x/text/internal/format".State. +func (p *printer) Language() language.Tag { return p.tag } + +func (p *printer) Width() (wid int, ok bool) { return p.fmt.Width, p.fmt.WidthPresent } + +func (p *printer) Precision() (prec int, ok bool) { return p.fmt.Prec, p.fmt.PrecPresent } + +func (p *printer) Flag(b int) bool { + switch b { + case '-': + return p.fmt.Minus + case '+': + return p.fmt.Plus || p.fmt.PlusV + case '#': + return p.fmt.Sharp || p.fmt.SharpV + case ' ': + return p.fmt.Space + case '0': + return p.fmt.Zero + } + return false +} + +// getField gets the i'th field of the struct value. +// If the field is itself is an interface, return a value for +// the thing inside the interface, not the interface itself. +func getField(v reflect.Value, i int) reflect.Value { + val := v.Field(i) + if val.Kind() == reflect.Interface && !val.IsNil() { + val = val.Elem() + } + return val +} + +func (p *printer) unknownType(v reflect.Value) { + if !v.IsValid() { + p.WriteString(nilAngleString) + return + } + p.WriteByte('?') + p.WriteString(v.Type().String()) + p.WriteByte('?') +} + +func (p *printer) badVerb(verb rune) { + p.erroring = true + p.WriteString(percentBangString) + p.WriteRune(verb) + p.WriteByte('(') + switch { + case p.arg != nil: + p.WriteString(reflect.TypeOf(p.arg).String()) + p.WriteByte('=') + p.printArg(p.arg, 'v') + case p.value.IsValid(): + p.WriteString(p.value.Type().String()) + p.WriteByte('=') + p.printValue(p.value, 'v', 0) + default: + p.WriteString(nilAngleString) + } + p.WriteByte(')') + p.erroring = false +} + +func (p *printer) fmtBool(v bool, verb rune) { + switch verb { + case 't', 'v': + p.fmt.fmt_boolean(v) + default: + p.badVerb(verb) + } +} + +// fmt0x64 formats a uint64 in hexadecimal and prefixes it with 0x or +// not, as requested, by temporarily setting the sharp flag. +func (p *printer) fmt0x64(v uint64, leading0x bool) { + sharp := p.fmt.Sharp + p.fmt.Sharp = leading0x + p.fmt.fmt_integer(v, 16, unsigned, ldigits) + p.fmt.Sharp = sharp +} + +// fmtInteger formats a signed or unsigned integer. +func (p *printer) fmtInteger(v uint64, isSigned bool, verb rune) { + switch verb { + case 'v': + if p.fmt.SharpV && !isSigned { + p.fmt0x64(v, true) + return + } + fallthrough + case 'd': + if p.fmt.Sharp || p.fmt.SharpV { + p.fmt.fmt_integer(v, 10, isSigned, ldigits) + } else { + p.fmtDecimalInt(v, isSigned) + } + case 'b': + p.fmt.fmt_integer(v, 2, isSigned, ldigits) + case 'o': + p.fmt.fmt_integer(v, 8, isSigned, ldigits) + case 'x': + p.fmt.fmt_integer(v, 16, isSigned, ldigits) + case 'X': + p.fmt.fmt_integer(v, 16, isSigned, udigits) + case 'c': + p.fmt.fmt_c(v) + case 'q': + if v <= utf8.MaxRune { + p.fmt.fmt_qc(v) + } else { + p.badVerb(verb) + } + case 'U': + p.fmt.fmt_unicode(v) + default: + p.badVerb(verb) + } +} + +// fmtFloat formats a float. The default precision for each verb +// is specified as last argument in the call to fmt_float. +func (p *printer) fmtFloat(v float64, size int, verb rune) { + switch verb { + case 'b': + p.fmt.fmt_float(v, size, verb, -1) + case 'v': + verb = 'g' + fallthrough + case 'g', 'G': + if p.fmt.Sharp || p.fmt.SharpV { + p.fmt.fmt_float(v, size, verb, -1) + } else { + p.fmtVariableFloat(v, size) + } + case 'e', 'E': + if p.fmt.Sharp || p.fmt.SharpV { + p.fmt.fmt_float(v, size, verb, 6) + } else { + p.fmtScientific(v, size, 6) + } + case 'f', 'F': + if p.fmt.Sharp || p.fmt.SharpV { + p.fmt.fmt_float(v, size, verb, 6) + } else { + p.fmtDecimalFloat(v, size, 6) + } + default: + p.badVerb(verb) + } +} + +func (p *printer) setFlags(f *number.Formatter) { + f.Flags &^= number.ElideSign + if p.fmt.Plus || p.fmt.Space { + f.Flags |= number.AlwaysSign + if !p.fmt.Plus { + f.Flags |= number.ElideSign + } + } else { + f.Flags &^= number.AlwaysSign + } +} + +func (p *printer) updatePadding(f *number.Formatter) { + f.Flags &^= number.PadMask + if p.fmt.Minus { + f.Flags |= number.PadAfterSuffix + } else { + f.Flags |= number.PadBeforePrefix + } + f.PadRune = ' ' + f.FormatWidth = uint16(p.fmt.Width) +} + +func (p *printer) initDecimal(minFrac, maxFrac int) { + f := &p.toDecimal + f.MinIntegerDigits = 1 + f.MaxIntegerDigits = 0 + f.MinFractionDigits = uint8(minFrac) + f.MaxFractionDigits = int16(maxFrac) + p.setFlags(f) + f.PadRune = 0 + if p.fmt.WidthPresent { + if p.fmt.Zero { + wid := p.fmt.Width + // Use significant integers for this. + // TODO: this is not the same as width, but so be it. + if f.MinFractionDigits > 0 { + wid -= 1 + int(f.MinFractionDigits) + } + if p.fmt.Plus || p.fmt.Space { + wid-- + } + if wid > 0 && wid > int(f.MinIntegerDigits) { + f.MinIntegerDigits = uint8(wid) + } + } + p.updatePadding(f) + } +} + +func (p *printer) initScientific(minFrac, maxFrac int) { + f := &p.toScientific + if maxFrac < 0 { + f.SetPrecision(maxFrac) + } else { + f.SetPrecision(maxFrac + 1) + f.MinFractionDigits = uint8(minFrac) + f.MaxFractionDigits = int16(maxFrac) + } + f.MinExponentDigits = 2 + p.setFlags(f) + f.PadRune = 0 + if p.fmt.WidthPresent { + f.Flags &^= number.PadMask + if p.fmt.Zero { + f.PadRune = f.Digit(0) + f.Flags |= number.PadAfterPrefix + } else { + f.PadRune = ' ' + f.Flags |= number.PadBeforePrefix + } + p.updatePadding(f) + } +} + +func (p *printer) fmtDecimalInt(v uint64, isSigned bool) { + var d number.Decimal + + f := &p.toDecimal + if p.fmt.PrecPresent { + p.setFlags(f) + f.MinIntegerDigits = uint8(p.fmt.Prec) + f.MaxIntegerDigits = 0 + f.MinFractionDigits = 0 + f.MaxFractionDigits = 0 + if p.fmt.WidthPresent { + p.updatePadding(f) + } + } else { + p.initDecimal(0, 0) + } + d.ConvertInt(p.toDecimal.RoundingContext, isSigned, v) + + out := p.toDecimal.Format([]byte(nil), &d) + p.Buffer.Write(out) +} + +func (p *printer) fmtDecimalFloat(v float64, size, prec int) { + var d number.Decimal + if p.fmt.PrecPresent { + prec = p.fmt.Prec + } + p.initDecimal(prec, prec) + d.ConvertFloat(p.toDecimal.RoundingContext, v, size) + + out := p.toDecimal.Format([]byte(nil), &d) + p.Buffer.Write(out) +} + +func (p *printer) fmtVariableFloat(v float64, size int) { + prec := -1 + if p.fmt.PrecPresent { + prec = p.fmt.Prec + } + var d number.Decimal + p.initScientific(0, prec) + d.ConvertFloat(p.toScientific.RoundingContext, v, size) + + // Copy logic of 'g' formatting from strconv. It is simplified a bit as + // we don't have to mind having prec > len(d.Digits). + shortest := prec < 0 + ePrec := prec + if shortest { + prec = len(d.Digits) + ePrec = 6 + } else if prec == 0 { + prec = 1 + ePrec = 1 + } + exp := int(d.Exp) - 1 + if exp < -4 || exp >= ePrec { + p.initScientific(0, prec) + + out := p.toScientific.Format([]byte(nil), &d) + p.Buffer.Write(out) + } else { + if prec > int(d.Exp) { + prec = len(d.Digits) + } + if prec -= int(d.Exp); prec < 0 { + prec = 0 + } + p.initDecimal(0, prec) + + out := p.toDecimal.Format([]byte(nil), &d) + p.Buffer.Write(out) + } +} + +func (p *printer) fmtScientific(v float64, size, prec int) { + var d number.Decimal + if p.fmt.PrecPresent { + prec = p.fmt.Prec + } + p.initScientific(prec, prec) + rc := p.toScientific.RoundingContext + d.ConvertFloat(rc, v, size) + + out := p.toScientific.Format([]byte(nil), &d) + p.Buffer.Write(out) + +} + +// fmtComplex formats a complex number v with +// r = real(v) and j = imag(v) as (r+ji) using +// fmtFloat for r and j formatting. +func (p *printer) fmtComplex(v complex128, size int, verb rune) { + // Make sure any unsupported verbs are found before the + // calls to fmtFloat to not generate an incorrect error string. + switch verb { + case 'v', 'b', 'g', 'G', 'f', 'F', 'e', 'E': + p.WriteByte('(') + p.fmtFloat(real(v), size/2, verb) + // Imaginary part always has a sign. + if math.IsNaN(imag(v)) { + // By CLDR's rules, NaNs do not use patterns or signs. As this code + // relies on AlwaysSign working for imaginary parts, we need to + // manually handle NaNs. + f := &p.toScientific + p.setFlags(f) + p.updatePadding(f) + p.setFlags(f) + nan := f.Symbol(number.SymNan) + extra := 0 + if w, ok := p.Width(); ok { + extra = w - utf8.RuneCountInString(nan) - 1 + } + if f.Flags&number.PadAfterNumber == 0 { + for ; extra > 0; extra-- { + p.WriteRune(f.PadRune) + } + } + p.WriteString(f.Symbol(number.SymPlusSign)) + p.WriteString(nan) + for ; extra > 0; extra-- { + p.WriteRune(f.PadRune) + } + p.WriteString("i)") + return + } + oldPlus := p.fmt.Plus + p.fmt.Plus = true + p.fmtFloat(imag(v), size/2, verb) + p.WriteString("i)") // TODO: use symbol? + p.fmt.Plus = oldPlus + default: + p.badVerb(verb) + } +} + +func (p *printer) fmtString(v string, verb rune) { + switch verb { + case 'v': + if p.fmt.SharpV { + p.fmt.fmt_q(v) + } else { + p.fmt.fmt_s(v) + } + case 's': + p.fmt.fmt_s(v) + case 'x': + p.fmt.fmt_sx(v, ldigits) + case 'X': + p.fmt.fmt_sx(v, udigits) + case 'q': + p.fmt.fmt_q(v) + case 'm': + ctx := p.cat.Context(p.tag, rawPrinter{p}) + if ctx.Execute(v) == catalog.ErrNotFound { + p.WriteString(v) + } + default: + p.badVerb(verb) + } +} + +func (p *printer) fmtBytes(v []byte, verb rune, typeString string) { + switch verb { + case 'v', 'd': + if p.fmt.SharpV { + p.WriteString(typeString) + if v == nil { + p.WriteString(nilParenString) + return + } + p.WriteByte('{') + for i, c := range v { + if i > 0 { + p.WriteString(commaSpaceString) + } + p.fmt0x64(uint64(c), true) + } + p.WriteByte('}') + } else { + p.WriteByte('[') + for i, c := range v { + if i > 0 { + p.WriteByte(' ') + } + p.fmt.fmt_integer(uint64(c), 10, unsigned, ldigits) + } + p.WriteByte(']') + } + case 's': + p.fmt.fmt_s(string(v)) + case 'x': + p.fmt.fmt_bx(v, ldigits) + case 'X': + p.fmt.fmt_bx(v, udigits) + case 'q': + p.fmt.fmt_q(string(v)) + default: + p.printValue(reflect.ValueOf(v), verb, 0) + } +} + +func (p *printer) fmtPointer(value reflect.Value, verb rune) { + var u uintptr + switch value.Kind() { + case reflect.Chan, reflect.Func, reflect.Map, reflect.Ptr, reflect.Slice, reflect.UnsafePointer: + u = value.Pointer() + default: + p.badVerb(verb) + return + } + + switch verb { + case 'v': + if p.fmt.SharpV { + p.WriteByte('(') + p.WriteString(value.Type().String()) + p.WriteString(")(") + if u == 0 { + p.WriteString(nilString) + } else { + p.fmt0x64(uint64(u), true) + } + p.WriteByte(')') + } else { + if u == 0 { + p.fmt.padString(nilAngleString) + } else { + p.fmt0x64(uint64(u), !p.fmt.Sharp) + } + } + case 'p': + p.fmt0x64(uint64(u), !p.fmt.Sharp) + case 'b', 'o', 'd', 'x', 'X': + if verb == 'd' { + p.fmt.Sharp = true // Print as standard go. TODO: does this make sense? + } + p.fmtInteger(uint64(u), unsigned, verb) + default: + p.badVerb(verb) + } +} + +func (p *printer) catchPanic(arg interface{}, verb rune) { + if err := recover(); err != nil { + // If it's a nil pointer, just say "". The likeliest causes are a + // Stringer that fails to guard against nil or a nil pointer for a + // value receiver, and in either case, "" is a nice result. + if v := reflect.ValueOf(arg); v.Kind() == reflect.Ptr && v.IsNil() { + p.WriteString(nilAngleString) + return + } + // Otherwise print a concise panic message. Most of the time the panic + // value will print itself nicely. + if p.panicking { + // Nested panics; the recursion in printArg cannot succeed. + panic(err) + } + + oldFlags := p.fmt.Parser + // For this output we want default behavior. + p.fmt.ClearFlags() + + p.WriteString(percentBangString) + p.WriteRune(verb) + p.WriteString(panicString) + p.panicking = true + p.printArg(err, 'v') + p.panicking = false + p.WriteByte(')') + + p.fmt.Parser = oldFlags + } +} + +func (p *printer) handleMethods(verb rune) (handled bool) { + if p.erroring { + return + } + // Is it a Formatter? + if formatter, ok := p.arg.(format.Formatter); ok { + handled = true + defer p.catchPanic(p.arg, verb) + formatter.Format(p, verb) + return + } + if formatter, ok := p.arg.(fmt.Formatter); ok { + handled = true + defer p.catchPanic(p.arg, verb) + formatter.Format(p, verb) + return + } + + // If we're doing Go syntax and the argument knows how to supply it, take care of it now. + if p.fmt.SharpV { + if stringer, ok := p.arg.(fmt.GoStringer); ok { + handled = true + defer p.catchPanic(p.arg, verb) + // Print the result of GoString unadorned. + p.fmt.fmt_s(stringer.GoString()) + return + } + } else { + // If a string is acceptable according to the format, see if + // the value satisfies one of the string-valued interfaces. + // Println etc. set verb to %v, which is "stringable". + switch verb { + case 'v', 's', 'x', 'X', 'q': + // Is it an error or Stringer? + // The duplication in the bodies is necessary: + // setting handled and deferring catchPanic + // must happen before calling the method. + switch v := p.arg.(type) { + case error: + handled = true + defer p.catchPanic(p.arg, verb) + p.fmtString(v.Error(), verb) + return + + case fmt.Stringer: + handled = true + defer p.catchPanic(p.arg, verb) + p.fmtString(v.String(), verb) + return + } + } + } + return false +} + +func (p *printer) printArg(arg interface{}, verb rune) { + p.arg = arg + p.value = reflect.Value{} + + if arg == nil { + switch verb { + case 'T', 'v': + p.fmt.padString(nilAngleString) + default: + p.badVerb(verb) + } + return + } + + // Special processing considerations. + // %T (the value's type) and %p (its address) are special; we always do them first. + switch verb { + case 'T': + p.fmt.fmt_s(reflect.TypeOf(arg).String()) + return + case 'p': + p.fmtPointer(reflect.ValueOf(arg), 'p') + return + } + + // Some types can be done without reflection. + switch f := arg.(type) { + case bool: + p.fmtBool(f, verb) + case float32: + p.fmtFloat(float64(f), 32, verb) + case float64: + p.fmtFloat(f, 64, verb) + case complex64: + p.fmtComplex(complex128(f), 64, verb) + case complex128: + p.fmtComplex(f, 128, verb) + case int: + p.fmtInteger(uint64(f), signed, verb) + case int8: + p.fmtInteger(uint64(f), signed, verb) + case int16: + p.fmtInteger(uint64(f), signed, verb) + case int32: + p.fmtInteger(uint64(f), signed, verb) + case int64: + p.fmtInteger(uint64(f), signed, verb) + case uint: + p.fmtInteger(uint64(f), unsigned, verb) + case uint8: + p.fmtInteger(uint64(f), unsigned, verb) + case uint16: + p.fmtInteger(uint64(f), unsigned, verb) + case uint32: + p.fmtInteger(uint64(f), unsigned, verb) + case uint64: + p.fmtInteger(f, unsigned, verb) + case uintptr: + p.fmtInteger(uint64(f), unsigned, verb) + case string: + p.fmtString(f, verb) + case []byte: + p.fmtBytes(f, verb, "[]byte") + case reflect.Value: + // Handle extractable values with special methods + // since printValue does not handle them at depth 0. + if f.IsValid() && f.CanInterface() { + p.arg = f.Interface() + if p.handleMethods(verb) { + return + } + } + p.printValue(f, verb, 0) + default: + // If the type is not simple, it might have methods. + if !p.handleMethods(verb) { + // Need to use reflection, since the type had no + // interface methods that could be used for formatting. + p.printValue(reflect.ValueOf(f), verb, 0) + } + } +} + +// printValue is similar to printArg but starts with a reflect value, not an interface{} value. +// It does not handle 'p' and 'T' verbs because these should have been already handled by printArg. +func (p *printer) printValue(value reflect.Value, verb rune, depth int) { + // Handle values with special methods if not already handled by printArg (depth == 0). + if depth > 0 && value.IsValid() && value.CanInterface() { + p.arg = value.Interface() + if p.handleMethods(verb) { + return + } + } + p.arg = nil + p.value = value + + switch f := value; value.Kind() { + case reflect.Invalid: + if depth == 0 { + p.WriteString(invReflectString) + } else { + switch verb { + case 'v': + p.WriteString(nilAngleString) + default: + p.badVerb(verb) + } + } + case reflect.Bool: + p.fmtBool(f.Bool(), verb) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + p.fmtInteger(uint64(f.Int()), signed, verb) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + p.fmtInteger(f.Uint(), unsigned, verb) + case reflect.Float32: + p.fmtFloat(f.Float(), 32, verb) + case reflect.Float64: + p.fmtFloat(f.Float(), 64, verb) + case reflect.Complex64: + p.fmtComplex(f.Complex(), 64, verb) + case reflect.Complex128: + p.fmtComplex(f.Complex(), 128, verb) + case reflect.String: + p.fmtString(f.String(), verb) + case reflect.Map: + if p.fmt.SharpV { + p.WriteString(f.Type().String()) + if f.IsNil() { + p.WriteString(nilParenString) + return + } + p.WriteByte('{') + } else { + p.WriteString(mapString) + } + keys := f.MapKeys() + for i, key := range keys { + if i > 0 { + if p.fmt.SharpV { + p.WriteString(commaSpaceString) + } else { + p.WriteByte(' ') + } + } + p.printValue(key, verb, depth+1) + p.WriteByte(':') + p.printValue(f.MapIndex(key), verb, depth+1) + } + if p.fmt.SharpV { + p.WriteByte('}') + } else { + p.WriteByte(']') + } + case reflect.Struct: + if p.fmt.SharpV { + p.WriteString(f.Type().String()) + } + p.WriteByte('{') + for i := 0; i < f.NumField(); i++ { + if i > 0 { + if p.fmt.SharpV { + p.WriteString(commaSpaceString) + } else { + p.WriteByte(' ') + } + } + if p.fmt.PlusV || p.fmt.SharpV { + if name := f.Type().Field(i).Name; name != "" { + p.WriteString(name) + p.WriteByte(':') + } + } + p.printValue(getField(f, i), verb, depth+1) + } + p.WriteByte('}') + case reflect.Interface: + value := f.Elem() + if !value.IsValid() { + if p.fmt.SharpV { + p.WriteString(f.Type().String()) + p.WriteString(nilParenString) + } else { + p.WriteString(nilAngleString) + } + } else { + p.printValue(value, verb, depth+1) + } + case reflect.Array, reflect.Slice: + switch verb { + case 's', 'q', 'x', 'X': + // Handle byte and uint8 slices and arrays special for the above verbs. + t := f.Type() + if t.Elem().Kind() == reflect.Uint8 { + var bytes []byte + if f.Kind() == reflect.Slice { + bytes = f.Bytes() + } else if f.CanAddr() { + bytes = f.Slice(0, f.Len()).Bytes() + } else { + // We have an array, but we cannot Slice() a non-addressable array, + // so we build a slice by hand. This is a rare case but it would be nice + // if reflection could help a little more. + bytes = make([]byte, f.Len()) + for i := range bytes { + bytes[i] = byte(f.Index(i).Uint()) + } + } + p.fmtBytes(bytes, verb, t.String()) + return + } + } + if p.fmt.SharpV { + p.WriteString(f.Type().String()) + if f.Kind() == reflect.Slice && f.IsNil() { + p.WriteString(nilParenString) + return + } + p.WriteByte('{') + for i := 0; i < f.Len(); i++ { + if i > 0 { + p.WriteString(commaSpaceString) + } + p.printValue(f.Index(i), verb, depth+1) + } + p.WriteByte('}') + } else { + p.WriteByte('[') + for i := 0; i < f.Len(); i++ { + if i > 0 { + p.WriteByte(' ') + } + p.printValue(f.Index(i), verb, depth+1) + } + p.WriteByte(']') + } + case reflect.Ptr: + // pointer to array or slice or struct? ok at top level + // but not embedded (avoid loops) + if depth == 0 && f.Pointer() != 0 { + switch a := f.Elem(); a.Kind() { + case reflect.Array, reflect.Slice, reflect.Struct, reflect.Map: + p.WriteByte('&') + p.printValue(a, verb, depth+1) + return + } + } + fallthrough + case reflect.Chan, reflect.Func, reflect.UnsafePointer: + p.fmtPointer(f, verb) + default: + p.unknownType(f) + } +} + +func (p *printer) badArgNum(verb rune) { + p.WriteString(percentBangString) + p.WriteRune(verb) + p.WriteString(badIndexString) +} + +func (p *printer) missingArg(verb rune) { + p.WriteString(percentBangString) + p.WriteRune(verb) + p.WriteString(missingString) +} + +func (p *printer) doPrintf(fmt string) { + for p.fmt.Parser.SetFormat(fmt); p.fmt.Scan(); { + switch p.fmt.Status { + case format.StatusText: + p.WriteString(p.fmt.Text()) + case format.StatusSubstitution: + p.printArg(p.Arg(p.fmt.ArgNum), p.fmt.Verb) + case format.StatusBadWidthSubstitution: + p.WriteString(badWidthString) + p.printArg(p.Arg(p.fmt.ArgNum), p.fmt.Verb) + case format.StatusBadPrecSubstitution: + p.WriteString(badPrecString) + p.printArg(p.Arg(p.fmt.ArgNum), p.fmt.Verb) + case format.StatusNoVerb: + p.WriteString(noVerbString) + case format.StatusBadArgNum: + p.badArgNum(p.fmt.Verb) + case format.StatusMissingArg: + p.missingArg(p.fmt.Verb) + default: + panic("unreachable") + } + } + + // Check for extra arguments, but only if there was at least one ordered + // argument. Note that this behavior is necessarily different from fmt: + // different variants of messages may opt to drop some or all of the + // arguments. + if !p.fmt.Reordered && p.fmt.ArgNum < len(p.fmt.Args) && p.fmt.ArgNum != 0 { + p.fmt.ClearFlags() + p.WriteString(extraString) + for i, arg := range p.fmt.Args[p.fmt.ArgNum:] { + if i > 0 { + p.WriteString(commaSpaceString) + } + if arg == nil { + p.WriteString(nilAngleString) + } else { + p.WriteString(reflect.TypeOf(arg).String()) + p.WriteString("=") + p.printArg(arg, 'v') + } + } + p.WriteByte(')') + } +} + +func (p *printer) doPrint(a []interface{}) { + prevString := false + for argNum, arg := range a { + isString := arg != nil && reflect.TypeOf(arg).Kind() == reflect.String + // Add a space between two non-string arguments. + if argNum > 0 && !isString && !prevString { + p.WriteByte(' ') + } + p.printArg(arg, 'v') + prevString = isString + } +} + +// doPrintln is like doPrint but always adds a space between arguments +// and a newline after the last argument. +func (p *printer) doPrintln(a []interface{}) { + for argNum, arg := range a { + if argNum > 0 { + p.WriteByte(' ') + } + p.printArg(arg, 'v') + } + p.WriteByte('\n') +} diff --git a/vendor/modules.txt b/vendor/modules.txt index 22536447..2afa3e92 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -110,6 +110,25 @@ github.com/cloudflare/circl/math/mlsbset github.com/cloudflare/circl/sign github.com/cloudflare/circl/sign/ed25519 github.com/cloudflare/circl/sign/ed448 +# github.com/compose-spec/compose-go/v2 v2.10.1 +## explicit; go 1.24 +github.com/compose-spec/compose-go/v2/cli +github.com/compose-spec/compose-go/v2/consts +github.com/compose-spec/compose-go/v2/dotenv +github.com/compose-spec/compose-go/v2/errdefs +github.com/compose-spec/compose-go/v2/format +github.com/compose-spec/compose-go/v2/graph +github.com/compose-spec/compose-go/v2/interpolation +github.com/compose-spec/compose-go/v2/loader +github.com/compose-spec/compose-go/v2/override +github.com/compose-spec/compose-go/v2/paths +github.com/compose-spec/compose-go/v2/schema +github.com/compose-spec/compose-go/v2/template +github.com/compose-spec/compose-go/v2/transform +github.com/compose-spec/compose-go/v2/tree +github.com/compose-spec/compose-go/v2/types +github.com/compose-spec/compose-go/v2/utils +github.com/compose-spec/compose-go/v2/validation # github.com/containerd/errdefs v1.0.0 ## explicit; go 1.20 github.com/containerd/errdefs @@ -427,6 +446,9 @@ github.com/mattn/go-localereader # github.com/mattn/go-runewidth v0.0.21 ## explicit; go 1.20 github.com/mattn/go-runewidth +# github.com/mattn/go-shellwords v1.0.12 +## explicit; go 1.13 +github.com/mattn/go-shellwords # github.com/mgutz/ansi v0.0.0-20200706080929-d51e80ef957d ## explicit github.com/mgutz/ansi @@ -529,6 +551,10 @@ github.com/rivo/uniseg # github.com/russross/blackfriday/v2 v2.1.0 ## explicit github.com/russross/blackfriday/v2 +# github.com/santhosh-tekuri/jsonschema/v6 v6.0.1 +## explicit; go 1.21 +github.com/santhosh-tekuri/jsonschema/v6 +github.com/santhosh-tekuri/jsonschema/v6/kind # github.com/schollz/progressbar/v3 v3.19.0 ## explicit; go 1.22 github.com/schollz/progressbar/v3 @@ -566,6 +592,9 @@ github.com/xeipuuv/gojsonreference # github.com/xeipuuv/gojsonschema v1.2.0 ## explicit github.com/xeipuuv/gojsonschema +# github.com/xhit/go-str2duration/v2 v2.1.0 +## explicit; go 1.13 +github.com/xhit/go-str2duration/v2 # github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e ## explicit; go 1.19 github.com/xo/terminfo @@ -662,6 +691,10 @@ go.yaml.in/yaml/v2 # go.yaml.in/yaml/v3 v3.0.4 ## explicit; go 1.16 go.yaml.in/yaml/v3 +# go.yaml.in/yaml/v4 v4.0.0-rc.3 +## explicit; go 1.18 +go.yaml.in/yaml/v4 +go.yaml.in/yaml/v4/internal/libyaml # golang.org/x/crypto v0.49.0 ## explicit; go 1.25.0 golang.org/x/crypto/argon2 @@ -699,6 +732,9 @@ golang.org/x/net/internal/socks golang.org/x/net/internal/timeseries golang.org/x/net/proxy golang.org/x/net/trace +# golang.org/x/sync v0.20.0 +## explicit; go 1.25.0 +golang.org/x/sync/errgroup # golang.org/x/sys v0.42.0 ## explicit; go 1.25.0 golang.org/x/sys/cpu @@ -713,11 +749,18 @@ golang.org/x/term # golang.org/x/text v0.35.0 ## explicit; go 1.25.0 golang.org/x/text/cases +golang.org/x/text/feature/plural golang.org/x/text/internal +golang.org/x/text/internal/catmsg +golang.org/x/text/internal/format golang.org/x/text/internal/language golang.org/x/text/internal/language/compact +golang.org/x/text/internal/number +golang.org/x/text/internal/stringset golang.org/x/text/internal/tag golang.org/x/text/language +golang.org/x/text/message +golang.org/x/text/message/catalog golang.org/x/text/secure/bidirule golang.org/x/text/transform golang.org/x/text/unicode/bidi