Compare commits
27 Commits
0.10.0-rc1
...
integratio
Author | SHA1 | Date | |
---|---|---|---|
0a371ec360 | |||
e58a716fe1 | |||
d09a19a385 | |||
cee808ff06 | |||
4326d1d259 | |||
b976872f77 | |||
7b6ea76437 | |||
9069758969 | |||
15d6b1a2a5 | |||
8a7fe4ca07
|
|||
64ad60663f | |||
cb3f46b46e | |||
41e514ae9a
|
|||
086b4828ff
|
|||
ed263854d4
|
|||
eb6fe4ba6e
|
|||
993172d31b | |||
c70b6e72a7 | |||
22e4dd7fca | |||
b6009057a8
|
|||
b978f04910
|
|||
3ac29d54d9
|
|||
877c17fab5
|
|||
f01fd26ce3
|
|||
273c165a41
|
|||
c88fc66c99
|
|||
9b271a6963
|
@ -4,6 +4,7 @@
|
||||
> please do add yourself! This is a community project, let's show some 💞
|
||||
|
||||
- 3wordchant
|
||||
- ammaratef45
|
||||
- cassowary
|
||||
- codegod100
|
||||
- decentral1se
|
||||
@ -17,3 +18,5 @@
|
||||
- roxxers
|
||||
- vera
|
||||
- yksflip
|
||||
- basebuilder
|
||||
- mayel
|
||||
|
@ -261,7 +261,7 @@ func init() {
|
||||
AppCmdCommand.Flags().BoolVarP(
|
||||
&requestTTY,
|
||||
"tty",
|
||||
"t",
|
||||
"T",
|
||||
false,
|
||||
"request remote TTY",
|
||||
)
|
||||
|
@ -3,6 +3,7 @@ package app
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"coopcloud.tech/abra/cli/internal"
|
||||
"coopcloud.tech/abra/pkg/app"
|
||||
@ -46,7 +47,8 @@ checkout as-is. Recipe commit hashes are also supported as values for
|
||||
ValidArgsFunction: func(
|
||||
cmd *cobra.Command,
|
||||
args []string,
|
||||
toComplete string) ([]string, cobra.ShellCompDirective) {
|
||||
toComplete string,
|
||||
) ([]string, cobra.ShellCompDirective) {
|
||||
switch l := len(args); l {
|
||||
case 0:
|
||||
return autocomplete.AppNameComplete()
|
||||
@ -63,10 +65,8 @@ checkout as-is. Recipe commit hashes are also supported as values for
|
||||
},
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
var (
|
||||
deployWarnMessages []string
|
||||
toDeployVersion string
|
||||
isChaosCommit bool
|
||||
toDeployChaosVersion = config.CHAOS_DEFAULT
|
||||
deployWarnMessages []string
|
||||
toDeployVersion string
|
||||
)
|
||||
|
||||
app := internal.ValidateApp(args)
|
||||
@ -79,10 +79,6 @@ checkout as-is. Recipe commit hashes are also supported as values for
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
if err := lint.LintForErrors(app.Recipe); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
cl, err := client.New(app.Server)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
@ -99,46 +95,20 @@ checkout as-is. Recipe commit hashes are also supported as values for
|
||||
log.Fatalf("%s is already deployed", app.Name)
|
||||
}
|
||||
|
||||
if len(args) == 2 && args[1] != "" {
|
||||
toDeployVersion = args[1]
|
||||
}
|
||||
|
||||
if !deployMeta.IsDeployed &&
|
||||
toDeployVersion == "" &&
|
||||
app.Recipe.EnvVersion != "" && !internal.IgnoreEnvVersion {
|
||||
log.Debugf("new deployment, choosing .env version: %s", app.Recipe.EnvVersion)
|
||||
toDeployVersion = app.Recipe.EnvVersion
|
||||
}
|
||||
|
||||
if !internal.Chaos && toDeployVersion == "" {
|
||||
if err := getLatestVersionOrCommit(app, &toDeployVersion); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
if internal.Chaos {
|
||||
if err := getChaosVersion(app, &toDeployVersion, &toDeployChaosVersion); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
toDeployVersion, err = getDeployVersion(args, deployMeta, app)
|
||||
if err != nil {
|
||||
log.Fatal(fmt.Errorf("get deploy version: %s", err))
|
||||
}
|
||||
|
||||
if !internal.Chaos {
|
||||
isChaosCommit, err = app.Recipe.EnsureVersion(toDeployVersion)
|
||||
_, err = app.Recipe.EnsureVersion(toDeployVersion)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
log.Fatalf("ensure recipe: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
if isChaosCommit {
|
||||
log.Debugf("assuming chaos commit: %s", toDeployVersion)
|
||||
|
||||
internal.Chaos = true
|
||||
toDeployChaosVersion = toDeployVersion
|
||||
|
||||
toDeployVersion, err = app.Recipe.GetVersionLabelLocal()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
if err := lint.LintForErrors(app.Recipe); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
if err := validateSecrets(cl, app); err != nil {
|
||||
@ -171,16 +141,14 @@ checkout as-is. Recipe commit hashes are also supported as values for
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
toDeployChaosVersionLabel := toDeployChaosVersion
|
||||
if app.Recipe.Dirty {
|
||||
toDeployChaosVersionLabel = formatter.AddDirtyMarker(toDeployChaosVersionLabel)
|
||||
}
|
||||
|
||||
appPkg.ExposeAllEnv(stackName, compose, app.Env)
|
||||
appPkg.SetRecipeLabel(compose, stackName, app.Recipe.Name)
|
||||
appPkg.SetChaosLabel(compose, stackName, internal.Chaos)
|
||||
appPkg.SetChaosVersionLabel(compose, stackName, toDeployChaosVersionLabel)
|
||||
if internal.Chaos {
|
||||
appPkg.SetChaosVersionLabel(compose, stackName, toDeployVersion)
|
||||
}
|
||||
appPkg.SetUpdateLabel(compose, stackName, app.Env)
|
||||
appPkg.SetVersionLabel(compose, stackName, toDeployVersion)
|
||||
|
||||
envVars, err := appPkg.CheckEnv(app)
|
||||
if err != nil {
|
||||
@ -212,19 +180,12 @@ checkout as-is. Recipe commit hashes are also supported as values for
|
||||
deployedVersion = deployMeta.Version
|
||||
}
|
||||
|
||||
toWriteVersion := toDeployVersion
|
||||
if internal.Chaos || isChaosCommit {
|
||||
toWriteVersion = toDeployChaosVersion
|
||||
}
|
||||
|
||||
if err := internal.DeployOverview(
|
||||
app,
|
||||
deployWarnMessages,
|
||||
deployedVersion,
|
||||
deployMeta.ChaosVersion,
|
||||
toDeployVersion,
|
||||
toDeployChaosVersion,
|
||||
toWriteVersion,
|
||||
"",
|
||||
deployWarnMessages,
|
||||
); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
@ -248,53 +209,28 @@ checkout as-is. Recipe commit hashes are also supported as values for
|
||||
}
|
||||
}
|
||||
|
||||
if err := app.WriteRecipeVersion(toWriteVersion, false); err != nil {
|
||||
if err := app.WriteRecipeVersion(toDeployVersion, false); err != nil {
|
||||
log.Fatalf("writing recipe version failed: %s", err)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
func getChaosVersion(app app.App, toDeployVersion, toDeployChaosVersion *string) error {
|
||||
var err error
|
||||
*toDeployChaosVersion, err = app.Recipe.ChaosVersion()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
*toDeployVersion, err = app.Recipe.GetVersionLabelLocal()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func getLatestVersionOrCommit(app app.App, toDeployVersion *string) error {
|
||||
func getLatestVersionOrCommit(app app.App) (string, error) {
|
||||
versions, err := app.Recipe.Tags()
|
||||
if err != nil {
|
||||
return err
|
||||
return "", err
|
||||
}
|
||||
|
||||
if len(versions) > 0 && !internal.Chaos {
|
||||
*toDeployVersion = versions[len(versions)-1]
|
||||
|
||||
log.Debugf("choosing %s as version to deploy", *toDeployVersion)
|
||||
|
||||
if _, err := app.Recipe.EnsureVersion(*toDeployVersion); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
return versions[len(versions)-1], nil
|
||||
}
|
||||
|
||||
head, err := app.Recipe.Head()
|
||||
if err != nil {
|
||||
return err
|
||||
return "", err
|
||||
}
|
||||
|
||||
*toDeployVersion = formatter.SmallSHA(head.String())
|
||||
|
||||
return nil
|
||||
return formatter.SmallSHA(head.String()), nil
|
||||
}
|
||||
|
||||
// validateArgsAndFlags ensures compatible args/flags.
|
||||
@ -321,6 +257,46 @@ func validateSecrets(cl *dockerClient.Client, app app.App) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func getDeployVersion(cliArgs []string, deployMeta stack.DeployMeta, app app.App) (string, error) {
|
||||
// Chaos mode overrides everything
|
||||
if internal.Chaos {
|
||||
v, err := app.Recipe.ChaosVersion()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
log.Debugf("version: taking chaos version: %s", v)
|
||||
return v, nil
|
||||
}
|
||||
|
||||
// Check if the deploy version is set with a cli argument
|
||||
if len(cliArgs) == 2 && cliArgs[1] != "" {
|
||||
log.Debugf("version: taking version from cli arg: %s", cliArgs[1])
|
||||
return cliArgs[1], nil
|
||||
}
|
||||
|
||||
// Check if the recipe has a version in the .env file
|
||||
if app.Recipe.EnvVersion != "" && !internal.IgnoreEnvVersion {
|
||||
if strings.HasSuffix(app.Recipe.EnvVersionRaw, "+U") {
|
||||
return "", fmt.Errorf("version: can not redeploy chaos version %s", app.Recipe.EnvVersionRaw)
|
||||
}
|
||||
log.Debugf("version: taking version from .env file: %s", app.Recipe.EnvVersion)
|
||||
return app.Recipe.EnvVersion, nil
|
||||
}
|
||||
|
||||
// Take deployed version
|
||||
if deployMeta.IsDeployed {
|
||||
log.Debugf("version: taking deployed version: %s", deployMeta.Version)
|
||||
return deployMeta.Version, nil
|
||||
}
|
||||
|
||||
v, err := getLatestVersionOrCommit(app)
|
||||
log.Debugf("version: taking new recipe version: %s", v)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return v, nil
|
||||
}
|
||||
|
||||
func init() {
|
||||
AppDeployCommand.Flags().BoolVarP(
|
||||
&internal.Chaos,
|
||||
|
@ -75,43 +75,41 @@ var AppNewCommand = &cobra.Command{
|
||||
|
||||
chaosVersion := config.CHAOS_DEFAULT
|
||||
if internal.Chaos {
|
||||
recipeVersion = chaosVersion
|
||||
|
||||
if !internal.Offline {
|
||||
if err := recipe.EnsureUpToDate(); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !internal.Chaos {
|
||||
if err := recipe.EnsureIsClean(); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
var recipeVersions recipePkg.RecipeVersions
|
||||
if recipeVersion == "" {
|
||||
var err error
|
||||
recipeVersions, _, err = recipe.GetRecipeVersions()
|
||||
chaosVersion, err = recipe.ChaosVersion()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
if len(recipeVersions) > 0 {
|
||||
latest := recipeVersions[len(recipeVersions)-1]
|
||||
for tag := range latest {
|
||||
recipeVersion = tag
|
||||
}
|
||||
|
||||
if _, err := recipe.EnsureVersion(recipeVersion); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
recipeVersion = chaosVersion
|
||||
} else {
|
||||
if err := recipe.EnsureLatest(); err != nil {
|
||||
if err := recipe.EnsureIsClean(); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
var recipeVersions recipePkg.RecipeVersions
|
||||
if recipeVersion == "" {
|
||||
var err error
|
||||
recipeVersions, _, err = recipe.GetRecipeVersions()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
if len(recipeVersions) > 0 {
|
||||
latest := recipeVersions[len(recipeVersions)-1]
|
||||
for tag := range latest {
|
||||
recipeVersion = tag
|
||||
}
|
||||
|
||||
if _, err := recipe.EnsureVersion(recipeVersion); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
} else {
|
||||
if err := recipe.EnsureLatest(); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err := ensureServerFlag(); err != nil {
|
||||
|
@ -123,6 +123,13 @@ Pass "--all-services/-a" to restart all services.`,
|
||||
var allServices bool
|
||||
|
||||
func init() {
|
||||
AppRestartCommand.Flags().BoolVarP(
|
||||
&internal.Chaos,
|
||||
"chaos",
|
||||
"C",
|
||||
false,
|
||||
"ignore uncommitted recipes changes",
|
||||
)
|
||||
AppRestartCommand.Flags().BoolVarP(
|
||||
&allServices,
|
||||
"all-services",
|
||||
|
@ -178,23 +178,18 @@ beforehand. See "abra app backup" for more.`,
|
||||
appPkg.ExposeAllEnv(stackName, compose, app.Env)
|
||||
appPkg.SetRecipeLabel(compose, stackName, app.Recipe.Name)
|
||||
appPkg.SetChaosLabel(compose, stackName, internal.Chaos)
|
||||
appPkg.SetChaosVersionLabel(compose, stackName, chosenDowngrade)
|
||||
if internal.Chaos {
|
||||
appPkg.SetChaosVersionLabel(compose, stackName, chosenDowngrade)
|
||||
}
|
||||
appPkg.SetUpdateLabel(compose, stackName, app.Env)
|
||||
|
||||
chaosVersion := config.CHAOS_DEFAULT
|
||||
if deployMeta.IsChaos {
|
||||
chaosVersion = deployMeta.ChaosVersion
|
||||
}
|
||||
|
||||
// NOTE(d1): no release notes implemeneted for rolling back
|
||||
if err := internal.NewVersionOverview(
|
||||
if err := internal.DeployOverview(
|
||||
app,
|
||||
downgradeWarnMessages,
|
||||
"rollback",
|
||||
deployMeta.Version,
|
||||
chaosVersion,
|
||||
chosenDowngrade,
|
||||
"",
|
||||
downgradeWarnMessages,
|
||||
); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
@ -247,7 +242,7 @@ func validateDowngradeVersionArg(
|
||||
) error {
|
||||
parsedDeployedVersion, err := tagcmp.Parse(deployMeta.Version)
|
||||
if err != nil {
|
||||
return fmt.Errorf("'%s' is not a known version for %s", deployMeta.Version, app.Recipe.Name)
|
||||
return fmt.Errorf("current deployment '%s' is not a known version for %s", deployMeta.Version, app.Recipe.Name)
|
||||
}
|
||||
|
||||
parsedSpecificVersion, err := tagcmp.Parse(specificVersion)
|
||||
|
@ -49,11 +49,11 @@ var AppSecretGenerateCommand = &cobra.Command{
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
if len(args) == 1 && !generateAllSecrets {
|
||||
if len(args) <= 2 && !generateAllSecrets {
|
||||
log.Fatal("missing arguments [secret]/[version] or '--all'")
|
||||
}
|
||||
|
||||
if len(args) > 1 && generateAllSecrets {
|
||||
if len(args) > 2 && generateAllSecrets {
|
||||
log.Fatal("cannot use '[secret] [version]' and '--all' together")
|
||||
}
|
||||
|
||||
|
@ -54,21 +54,12 @@ Passing "--prune/-p" does not remove those volumes.`,
|
||||
log.Fatalf("%s is not deployed?", app.Name)
|
||||
}
|
||||
|
||||
chaosVersion := config.CHAOS_DEFAULT
|
||||
if deployMeta.IsChaos {
|
||||
chaosVersion = deployMeta.ChaosVersion
|
||||
}
|
||||
|
||||
toWriteVersion := deployMeta.Version
|
||||
if deployMeta.IsChaos {
|
||||
toWriteVersion = chaosVersion
|
||||
}
|
||||
|
||||
if err := internal.UndeployOverview(
|
||||
if err := internal.DeployOverview(
|
||||
app,
|
||||
deployMeta.Version,
|
||||
chaosVersion,
|
||||
toWriteVersion,
|
||||
config.NO_DOMAIN_DEFAULT,
|
||||
"",
|
||||
nil,
|
||||
); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
@ -87,7 +78,7 @@ Passing "--prune/-p" does not remove those volumes.`,
|
||||
}
|
||||
}
|
||||
|
||||
if err := app.WriteRecipeVersion(toWriteVersion, false); err != nil {
|
||||
if err := app.WriteRecipeVersion(deployMeta.Version, false); err != nil {
|
||||
log.Fatalf("writing recipe version failed: %s", err)
|
||||
}
|
||||
},
|
||||
|
@ -43,7 +43,8 @@ beforehand. See "abra app backup" for more.`,
|
||||
ValidArgsFunction: func(
|
||||
cmd *cobra.Command,
|
||||
args []string,
|
||||
toComplete string) ([]string, cobra.ShellCompDirective) {
|
||||
toComplete string,
|
||||
) ([]string, cobra.ShellCompDirective) {
|
||||
switch l := len(args); l {
|
||||
case 0:
|
||||
return autocomplete.AppNameComplete()
|
||||
@ -183,7 +184,9 @@ beforehand. See "abra app backup" for more.`,
|
||||
appPkg.ExposeAllEnv(stackName, compose, app.Env)
|
||||
appPkg.SetRecipeLabel(compose, stackName, app.Recipe.Name)
|
||||
appPkg.SetChaosLabel(compose, stackName, internal.Chaos)
|
||||
appPkg.SetChaosVersionLabel(compose, stackName, chosenUpgrade)
|
||||
if internal.Chaos {
|
||||
appPkg.SetChaosVersionLabel(compose, stackName, chosenUpgrade)
|
||||
}
|
||||
appPkg.SetUpdateLabel(compose, stackName, app.Env)
|
||||
|
||||
envVars, err := appPkg.CheckEnv(app)
|
||||
@ -204,23 +207,21 @@ beforehand. See "abra app backup" for more.`,
|
||||
return
|
||||
}
|
||||
|
||||
chaosVersion := config.CHAOS_DEFAULT
|
||||
if deployMeta.IsChaos {
|
||||
chaosVersion = deployMeta.ChaosVersion
|
||||
|
||||
if deployMeta.ChaosVersion == "" {
|
||||
chaosVersion = config.UNKNOWN_DEFAULT
|
||||
}
|
||||
if upgradeReleaseNotes != "" && chosenUpgrade != "" {
|
||||
fmt.Print(upgradeReleaseNotes)
|
||||
} else {
|
||||
upgradeWarnMessages = append(
|
||||
upgradeWarnMessages,
|
||||
fmt.Sprintf("no release notes available for %s", chosenUpgrade),
|
||||
)
|
||||
}
|
||||
|
||||
if err := internal.NewVersionOverview(
|
||||
if err := internal.DeployOverview(
|
||||
app,
|
||||
upgradeWarnMessages,
|
||||
"upgrade",
|
||||
deployMeta.Version,
|
||||
chaosVersion,
|
||||
chosenUpgrade,
|
||||
upgradeReleaseNotes,
|
||||
upgradeWarnMessages,
|
||||
); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
@ -363,7 +364,7 @@ func validateUpgradeVersionArg(
|
||||
|
||||
parsedDeployedVersion, err := tagcmp.Parse(deployMeta.Version)
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("'%s' is not a known version", deployMeta.Version)
|
||||
}
|
||||
|
||||
if parsedSpecificVersion.IsLessThan(parsedDeployedVersion) &&
|
||||
@ -395,9 +396,7 @@ func ensureDeployed(cl *dockerClient.Client, app app.App) (stack.DeployMeta, err
|
||||
return deployMeta, nil
|
||||
}
|
||||
|
||||
var (
|
||||
showReleaseNotes bool
|
||||
)
|
||||
var showReleaseNotes bool
|
||||
|
||||
func init() {
|
||||
AppUpgradeCommand.Flags().BoolVarP(
|
||||
|
@ -25,6 +25,11 @@ var CatalogueGenerateCommand = &cobra.Command{
|
||||
Short: "Generate the recipe catalogue",
|
||||
Long: `Generate a new copy of the recipe catalogue.
|
||||
|
||||
N.B. this command **will** wipe local unstaged changes from your local recipes
|
||||
if present. "--chaos/-C" on this command refers to the catalogue repository
|
||||
("$ABRA_DIR/catalogue") and not the recipes. Please take care not to lose your
|
||||
changes.
|
||||
|
||||
It is possible to generate new metadata for a single recipe by passing
|
||||
[recipe]. The existing local catalogue will be updated, not overwritten.
|
||||
|
||||
|
@ -12,17 +12,16 @@ var AutocompleteCommand = &cobra.Command{
|
||||
Long: `To load completions:
|
||||
|
||||
Bash:
|
||||
|
||||
# Load autocompletion for the current Bash session
|
||||
$ source <(abra autocomplete bash)
|
||||
|
||||
# To load autocompletion for each session, execute once:
|
||||
# Linux:
|
||||
$ abra autocomplete bash > /etc/bash_completion.d/abra
|
||||
$ abra autocomplete bash | sudo tee /etc/bash_completion.d/abra
|
||||
# macOS:
|
||||
$ abra autocomplete bash > $(brew --prefix)/etc/bash_completion.d/abra
|
||||
$ abra autocomplete bash | sudo tee $(brew --prefix)/etc/bash_completion.d/abra
|
||||
|
||||
Zsh:
|
||||
|
||||
# If shell autocompletion is not already enabled in your environment,
|
||||
# you will need to enable it. You can execute the following once:
|
||||
|
||||
@ -34,14 +33,12 @@ Zsh:
|
||||
# You will need to start a new shell for this setup to take effect.
|
||||
|
||||
fish:
|
||||
|
||||
$ abra autocomplete fish | source
|
||||
|
||||
# To load autocompletions for each session, execute once:
|
||||
$ abra autocomplete fish > ~/.config/fish/completions/abra.fish
|
||||
|
||||
PowerShell:
|
||||
|
||||
PS> abra autocomplete powershell | Out-String | Invoke-Expression
|
||||
|
||||
# To load autocompletions for every new session, run:
|
||||
|
@ -10,7 +10,6 @@ import (
|
||||
"coopcloud.tech/abra/pkg/config"
|
||||
"coopcloud.tech/abra/pkg/formatter"
|
||||
"coopcloud.tech/abra/pkg/log"
|
||||
"coopcloud.tech/abra/pkg/recipe"
|
||||
"coopcloud.tech/tagcmp"
|
||||
"github.com/AlecAivazis/survey/v2"
|
||||
"github.com/charmbracelet/lipgloss"
|
||||
@ -38,100 +37,21 @@ func horizontal(left, mid, right string) string {
|
||||
return lipgloss.JoinHorizontal(lipgloss.Left, left, mid, right)
|
||||
}
|
||||
|
||||
// NewVersionOverview shows an upgrade or downgrade overview
|
||||
func NewVersionOverview(
|
||||
app appPkg.App,
|
||||
warnMessages []string,
|
||||
kind,
|
||||
deployedVersion,
|
||||
deployedChaosVersion,
|
||||
toDeployVersion,
|
||||
releaseNotes string) error {
|
||||
deployConfig := "compose.yml"
|
||||
if composeFiles, ok := app.Env["COMPOSE_FILE"]; ok {
|
||||
deployConfig = composeFiles
|
||||
}
|
||||
|
||||
server := app.Server
|
||||
if app.Server == "default" {
|
||||
server = "local"
|
||||
}
|
||||
|
||||
domain := app.Domain
|
||||
if domain == "" {
|
||||
domain = config.NO_DOMAIN_DEFAULT
|
||||
}
|
||||
|
||||
upperKind := strings.ToUpper(kind)
|
||||
|
||||
rows := [][]string{
|
||||
{"DOMAIN", domain},
|
||||
{"RECIPE", app.Recipe.Name},
|
||||
{"SERVER", server},
|
||||
{"CONFIG", deployConfig},
|
||||
|
||||
{"CURRENT DEPLOYMENT", "---"},
|
||||
{"VERSION", formatter.BoldDirtyDefault(deployedVersion)},
|
||||
{"CHAOS ", formatter.BoldDirtyDefault(deployedChaosVersion)},
|
||||
|
||||
{upperKind, "---"},
|
||||
{"VERSION", formatter.BoldDirtyDefault(toDeployVersion)},
|
||||
|
||||
{fmt.Sprintf("%s.ENV", strings.ToUpper(app.Domain)), "---"},
|
||||
{"CURRENT VERSION", formatter.BoldDirtyDefault(app.Recipe.EnvVersion)},
|
||||
{"NEW VERSION", formatter.BoldDirtyDefault(toDeployVersion)},
|
||||
}
|
||||
|
||||
overview := formatter.CreateOverview(
|
||||
fmt.Sprintf("%s OVERVIEW", upperKind),
|
||||
rows,
|
||||
)
|
||||
|
||||
fmt.Println(overview)
|
||||
|
||||
if releaseNotes != "" && toDeployVersion != "" {
|
||||
fmt.Print(releaseNotes)
|
||||
} else {
|
||||
warnMessages = append(
|
||||
warnMessages,
|
||||
fmt.Sprintf("no release notes available for %s", toDeployVersion),
|
||||
)
|
||||
}
|
||||
|
||||
for _, msg := range warnMessages {
|
||||
log.Warn(msg)
|
||||
}
|
||||
|
||||
if NoInput {
|
||||
return nil
|
||||
}
|
||||
|
||||
response := false
|
||||
prompt := &survey.Confirm{Message: "proceed?"}
|
||||
if err := survey.AskOne(prompt, &response); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !response {
|
||||
log.Fatal("deployment cancelled")
|
||||
}
|
||||
|
||||
return nil
|
||||
func formatComposeFiles(composeFiles string) string {
|
||||
return strings.ReplaceAll(composeFiles, ":", "\n")
|
||||
}
|
||||
|
||||
// DeployOverview shows a deployment overview
|
||||
func DeployOverview(
|
||||
app appPkg.App,
|
||||
warnMessages []string,
|
||||
deployedVersion string,
|
||||
deployedChaosVersion string,
|
||||
toDeployVersion,
|
||||
toDeployChaosVersion string,
|
||||
toWriteVersion string,
|
||||
toDeployVersion string,
|
||||
info string,
|
||||
warnMessages []string,
|
||||
) error {
|
||||
deployConfig := "compose.yml"
|
||||
if composeFiles, ok := app.Env["COMPOSE_FILE"]; ok {
|
||||
deployConfig = composeFiles
|
||||
deployConfig = formatComposeFiles(composeFiles)
|
||||
}
|
||||
|
||||
server := app.Server
|
||||
@ -144,21 +64,7 @@ func DeployOverview(
|
||||
domain = config.NO_DOMAIN_DEFAULT
|
||||
}
|
||||
|
||||
if app.Recipe.Dirty {
|
||||
toWriteVersion = formatter.AddDirtyMarker(toWriteVersion)
|
||||
toDeployChaosVersion = formatter.AddDirtyMarker(toDeployChaosVersion)
|
||||
}
|
||||
|
||||
recipeName, exists := app.Env["RECIPE"]
|
||||
if !exists {
|
||||
recipeName = app.Env["TYPE"]
|
||||
}
|
||||
|
||||
envVersion, err := recipe.GetEnvVersionRaw(recipeName)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
envVersion := app.Recipe.EnvVersionRaw
|
||||
if envVersion == "" {
|
||||
envVersion = config.NO_VERSION_DEFAULT
|
||||
}
|
||||
@ -168,24 +74,21 @@ func DeployOverview(
|
||||
{"RECIPE", app.Recipe.Name},
|
||||
{"SERVER", server},
|
||||
{"CONFIG", deployConfig},
|
||||
|
||||
{"CURRENT DEPLOYMENT", "---"},
|
||||
{"VERSION", formatter.BoldDirtyDefault(deployedVersion)},
|
||||
{"CHAOS", formatter.BoldDirtyDefault(deployedChaosVersion)},
|
||||
|
||||
{"NEW DEPLOYMENT", "---"},
|
||||
{"VERSION", formatter.BoldDirtyDefault(toDeployVersion)},
|
||||
{"CHAOS", formatter.BoldDirtyDefault(toDeployChaosVersion)},
|
||||
|
||||
{fmt.Sprintf("%s.ENV", strings.ToUpper(app.Name)), "---"},
|
||||
{"CURRENT VERSION", formatter.BoldDirtyDefault(envVersion)},
|
||||
{"NEW VERSION", formatter.BoldDirtyDefault(toWriteVersion)},
|
||||
{"", ""},
|
||||
{"CURRENT DEPLOYMENT", formatter.BoldDirtyDefault(deployedVersion)},
|
||||
{"ENV VERSION", formatter.BoldDirtyDefault(envVersion)},
|
||||
{"NEW DEPLOYMENT", formatter.BoldDirtyDefault(toDeployVersion)},
|
||||
}
|
||||
|
||||
overview := formatter.CreateOverview("DEPLOY OVERVIEW", rows)
|
||||
deployType := getDeployType(deployedVersion, toDeployVersion)
|
||||
overview := formatter.CreateOverview(fmt.Sprintf("%s OVERVIEW", deployType), rows)
|
||||
|
||||
fmt.Println(overview)
|
||||
|
||||
if info != "" {
|
||||
fmt.Println(info)
|
||||
}
|
||||
|
||||
for _, msg := range warnMessages {
|
||||
log.Warn(msg)
|
||||
}
|
||||
@ -207,76 +110,34 @@ func DeployOverview(
|
||||
return nil
|
||||
}
|
||||
|
||||
// UndeployOverview shows an undeployment overview
|
||||
func UndeployOverview(
|
||||
app appPkg.App,
|
||||
deployedVersion,
|
||||
deployedChaosVersion,
|
||||
toWriteVersion string,
|
||||
) error {
|
||||
deployConfig := "compose.yml"
|
||||
if composeFiles, ok := app.Env["COMPOSE_FILE"]; ok {
|
||||
deployConfig = composeFiles
|
||||
func getDeployType(currentVersion, newVersion string) string {
|
||||
if newVersion == config.NO_DOMAIN_DEFAULT {
|
||||
return "UNDEPLOY"
|
||||
}
|
||||
|
||||
server := app.Server
|
||||
if app.Server == "default" {
|
||||
server = "local"
|
||||
if strings.Contains(newVersion, "+U") {
|
||||
return "CHAOS DEPLOY"
|
||||
}
|
||||
|
||||
domain := app.Domain
|
||||
if domain == "" {
|
||||
domain = config.NO_DOMAIN_DEFAULT
|
||||
if strings.Contains(currentVersion, "+U") {
|
||||
return "UNCHAOS DEPLOY"
|
||||
}
|
||||
|
||||
recipeName, exists := app.Env["RECIPE"]
|
||||
if !exists {
|
||||
recipeName = app.Env["TYPE"]
|
||||
if currentVersion == newVersion {
|
||||
return "REDEPLOY"
|
||||
}
|
||||
|
||||
envVersion, err := recipe.GetEnvVersionRaw(recipeName)
|
||||
if currentVersion == config.NO_VERSION_DEFAULT {
|
||||
return "NEW DEPLOY"
|
||||
}
|
||||
currentParsed, err := tagcmp.Parse(currentVersion)
|
||||
if err != nil {
|
||||
return err
|
||||
return "DEPLOY"
|
||||
}
|
||||
|
||||
if envVersion == "" {
|
||||
envVersion = config.NO_VERSION_DEFAULT
|
||||
newParsed, err := tagcmp.Parse(newVersion)
|
||||
if err != nil {
|
||||
return "DEPLOY"
|
||||
}
|
||||
|
||||
rows := [][]string{
|
||||
{"DOMAIN", domain},
|
||||
{"RECIPE", app.Recipe.Name},
|
||||
{"SERVER", server},
|
||||
{"CONFIG", deployConfig},
|
||||
|
||||
{"CURRENT DEPLOYMENT", "---"},
|
||||
{"VERSION", formatter.BoldDirtyDefault(deployedVersion)},
|
||||
{"CHAOS", formatter.BoldDirtyDefault(deployedChaosVersion)},
|
||||
|
||||
{fmt.Sprintf("%s.ENV", strings.ToUpper(app.Name)), "---"},
|
||||
{"CURRENT VERSION", formatter.BoldDirtyDefault(envVersion)},
|
||||
{"NEW VERSION", formatter.BoldDirtyDefault(toWriteVersion)},
|
||||
if currentParsed.IsLessThan(newParsed) {
|
||||
return "UPGRADE"
|
||||
}
|
||||
|
||||
overview := formatter.CreateOverview("UNDEPLOY OVERVIEW", rows)
|
||||
|
||||
fmt.Println(overview)
|
||||
|
||||
if NoInput {
|
||||
return nil
|
||||
}
|
||||
|
||||
response := false
|
||||
prompt := &survey.Confirm{Message: "proceed?"}
|
||||
if err := survey.AskOne(prompt, &response); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !response {
|
||||
log.Fatal("undeploy cancelled")
|
||||
}
|
||||
|
||||
return nil
|
||||
return "DOWNGRADE"
|
||||
}
|
||||
|
||||
// PostCmds parses a string of commands and executes them inside of the respective services
|
||||
|
@ -267,6 +267,8 @@ func addReleaseNotes(recipe recipe.Recipe, tag string) error {
|
||||
return err
|
||||
}
|
||||
|
||||
var addNextAsReleaseNotes bool
|
||||
|
||||
nextReleaseNotePath := path.Join(releaseDir, "next")
|
||||
if _, err := os.Stat(nextReleaseNotePath); err == nil {
|
||||
// release/next note exists. Move it to release/<tag>
|
||||
@ -276,38 +278,37 @@ func addReleaseNotes(recipe recipe.Recipe, tag string) error {
|
||||
}
|
||||
|
||||
if !internal.NoInput {
|
||||
prompt := &survey.Input{
|
||||
prompt := &survey.Confirm{
|
||||
Message: "Use release note in release/next?",
|
||||
}
|
||||
var addReleaseNote bool
|
||||
if err := survey.AskOne(prompt, &addReleaseNote); err != nil {
|
||||
|
||||
if err := survey.AskOne(prompt, &addNextAsReleaseNotes); err != nil {
|
||||
return err
|
||||
}
|
||||
if !addReleaseNote {
|
||||
|
||||
if !addNextAsReleaseNotes {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
err := os.Rename(nextReleaseNotePath, tagReleaseNotePath)
|
||||
if err != nil {
|
||||
if err := os.Rename(nextReleaseNotePath, tagReleaseNotePath); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = gitPkg.Add(recipe.Dir, path.Join("release", "next"), internal.Dry)
|
||||
if err != nil {
|
||||
if err := gitPkg.Add(recipe.Dir, path.Join("release", "next"), internal.Dry); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = gitPkg.Add(recipe.Dir, path.Join("release", tag), internal.Dry)
|
||||
if err != nil {
|
||||
if err := gitPkg.Add(recipe.Dir, path.Join("release", tag), internal.Dry); err != nil {
|
||||
return err
|
||||
}
|
||||
} else if !errors.Is(err, os.ErrNotExist) {
|
||||
return err
|
||||
}
|
||||
|
||||
// No release note exists for the current release.
|
||||
if internal.NoInput {
|
||||
// NOTE(d1): No release note exists for the current release. Or, we've
|
||||
// already used release/next as the release note
|
||||
if internal.NoInput || addNextAsReleaseNotes {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -51,6 +51,10 @@ func Run(version, commit string) {
|
||||
log.Logger.SetStyles(charmLog.DefaultStyles())
|
||||
charmLog.SetDefault(log.Logger)
|
||||
|
||||
if internal.MachineReadable {
|
||||
log.SetOutput(os.Stderr)
|
||||
}
|
||||
|
||||
if internal.Debug {
|
||||
log.SetLevel(log.DebugLevel)
|
||||
log.SetOutput(os.Stderr)
|
||||
|
24
go.mod
24
go.mod
@ -17,10 +17,10 @@ require (
|
||||
github.com/go-git/go-git/v5 v5.13.1
|
||||
github.com/google/go-cmp v0.6.0
|
||||
github.com/moby/sys/signal v0.7.1
|
||||
github.com/moby/term v0.5.0
|
||||
github.com/moby/term v0.5.2
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/schollz/progressbar/v3 v3.17.1
|
||||
golang.org/x/term v0.27.0
|
||||
golang.org/x/term v0.28.0
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
gotest.tools/v3 v3.5.1
|
||||
)
|
||||
@ -111,19 +111,19 @@ require (
|
||||
go.opentelemetry.io/otel/sdk v1.33.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk/metric v1.33.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.33.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v1.4.0 // indirect
|
||||
golang.org/x/crypto v0.31.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20250103183323-7d7fa50e5329 // indirect
|
||||
go.opentelemetry.io/proto/otlp v1.5.0 // indirect
|
||||
golang.org/x/crypto v0.32.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 // indirect
|
||||
golang.org/x/mod v0.22.0 // indirect
|
||||
golang.org/x/net v0.33.0 // indirect
|
||||
golang.org/x/net v0.34.0 // indirect
|
||||
golang.org/x/sync v0.10.0 // indirect
|
||||
golang.org/x/text v0.21.0 // indirect
|
||||
golang.org/x/time v0.8.0 // indirect
|
||||
golang.org/x/tools v0.28.0 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250102185135-69823020774d // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250102185135-69823020774d // indirect
|
||||
golang.org/x/time v0.9.0 // indirect
|
||||
golang.org/x/tools v0.29.0 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250106144421-5f5ef82da422 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250106144421-5f5ef82da422 // indirect
|
||||
google.golang.org/grpc v1.69.2 // indirect
|
||||
google.golang.org/protobuf v1.36.1 // indirect
|
||||
google.golang.org/protobuf v1.36.2 // indirect
|
||||
gopkg.in/warnings.v0 v0.1.2 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
)
|
||||
@ -146,5 +146,5 @@ require (
|
||||
github.com/stretchr/testify v1.10.0
|
||||
github.com/theupdateframework/notary v0.7.0 // indirect
|
||||
github.com/xeipuuv/gojsonpointer v0.0.0-20190905194746-02993c407bfb // indirect
|
||||
golang.org/x/sys v0.28.0
|
||||
golang.org/x/sys v0.29.0
|
||||
)
|
||||
|
28
go.sum
28
go.sum
@ -131,6 +131,7 @@ github.com/cenkalti/backoff/v4 v4.1.1/go.mod h1:scbssz8iZGpm3xbr14ovlUdkxfGXNInq
|
||||
github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8=
|
||||
github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
|
||||
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
|
||||
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
|
||||
@ -519,6 +520,7 @@ github.com/grpc-ecosystem/go-grpc-middleware v1.0.1-0.20190118093823-f849b5445de
|
||||
github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.25.1 h1:VNqngBF40hVlDloBruUehVYC3ArSgIyScOAyMRqBxRg=
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.25.1/go.mod h1:RBRO7fro65R6tjKzYgLAFo0t1QEXY1Dp+i/bvpRiqiQ=
|
||||
@ -673,6 +675,8 @@ github.com/moby/sys/userns v0.1.0/go.mod h1:IHUYgu/kao6N8YZlp9Cf444ySSvCmDlmzUcY
|
||||
github.com/moby/term v0.0.0-20200312100748-672ec06f55cd/go.mod h1:DdlQx2hp0Ss5/fLikoLlEeIYiATotOjgB//nb973jeo=
|
||||
github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0=
|
||||
github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y=
|
||||
github.com/moby/term v0.5.2 h1:6qk3FJAFDs6i/q3W/pQ97SX192qKfZgGjCQqfCJkgzQ=
|
||||
github.com/moby/term v0.5.2/go.mod h1:d3djjFCrjnB+fl8NJux+EJzu0msscUP+f8it8hPkFLc=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
@ -806,6 +810,7 @@ github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6L
|
||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
|
||||
github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
|
||||
github.com/russross/blackfriday v1.6.0 h1:KqfZb0pUVN2lYqZUYRddxF4OR8ZMURnJIG5Y3VRLtww=
|
||||
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
@ -954,6 +959,8 @@ go.opentelemetry.io/otel/trace v1.33.0/go.mod h1:uIcdVUZMpTAmz0tI1z04GoVSezK37Cb
|
||||
go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
|
||||
go.opentelemetry.io/proto/otlp v1.4.0 h1:TA9WRvW6zMwP+Ssb6fLoUIuirti1gGbP28GcKG1jgeg=
|
||||
go.opentelemetry.io/proto/otlp v1.4.0/go.mod h1:PPBWZIP98o2ElSqI35IHfu7hIhSwvc5N38Jw8pXuGFY=
|
||||
go.opentelemetry.io/proto/otlp v1.5.0 h1:xJvq7gMzB31/d406fB8U5CBdyQGw4P399D1aQWU/3i4=
|
||||
go.opentelemetry.io/proto/otlp v1.5.0/go.mod h1:keN8WnHxOy8PG0rQZjJJ5A2ebUoafqWp0eVQ4yIXvJ4=
|
||||
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
go.uber.org/goleak v1.3.0 h1:2K3zAYmnTNqV73imy9J1T3WC+gmCePx2hEGkimedGto=
|
||||
@ -980,6 +987,8 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y
|
||||
golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U=
|
||||
golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=
|
||||
golang.org/x/crypto v0.32.0 h1:euUpcYgM8WcP71gNpTqQCn6rC2t6ULUPiOzfWaXVVfc=
|
||||
golang.org/x/crypto v0.32.0/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
|
||||
@ -992,6 +1001,8 @@ golang.org/x/exp v0.0.0-20200207192155-f17229e696bd/go.mod h1:J/WKrq2StrnmMY6+EH
|
||||
golang.org/x/exp v0.0.0-20200224162631-6cc2880d07d6/go.mod h1:3jZMyOhIsHpP37uCMkUooju7aAi5cS1Q23tOzKc+0MU=
|
||||
golang.org/x/exp v0.0.0-20250103183323-7d7fa50e5329 h1:9kj3STMvgqy3YA4VQXBrN7925ICMxD5wzMRcgA30588=
|
||||
golang.org/x/exp v0.0.0-20250103183323-7d7fa50e5329/go.mod h1:qj5a5QZpwLU2NLQudwIN5koi3beDhSAlJwa67PuM98c=
|
||||
golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8 h1:yqrTHse8TCMW1M1ZCP+VAR/l0kKxwaAIqN/il7x4voA=
|
||||
golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8/go.mod h1:tujkw807nyEEAamNbDrEGzRav+ilXA7PCRAd6xsmwiU=
|
||||
golang.org/x/image v0.0.0-20190227222117-0694c2d4d067/go.mod h1:kZ7UVZpmo3dzQBMxlp+ypCbDeSB+sBbTgSJuh5dn5js=
|
||||
golang.org/x/image v0.0.0-20190802002840-cff245a6509b/go.mod h1:FeLwcggjj3mMvU+oOTbSwawSJRM1uh48EjtB4UJZlP0=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
@ -1059,6 +1070,8 @@ golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qx
|
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I=
|
||||
golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4=
|
||||
golang.org/x/net v0.34.0 h1:Mb7Mrk043xzHgnRM88suvJFwzVrRfHEHJEl5/71CKw0=
|
||||
golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
@ -1159,11 +1172,15 @@ golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA=
|
||||
golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU=
|
||||
golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q=
|
||||
golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM=
|
||||
golang.org/x/term v0.28.0 h1:/Ts8HFuMR2E6IP/jlo7QVLZHggjKQbhu/7H0LJFr3Gg=
|
||||
golang.org/x/term v0.28.0/go.mod h1:Sw/lC2IAUZ92udQNf3WodGtn4k/XoLyZoh8v/8uiwek=
|
||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
@ -1183,6 +1200,8 @@ golang.org/x/time v0.0.0-20200416051211-89c76fbcd5d1/go.mod h1:tRJNPiyCQ0inRvYxb
|
||||
golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.8.0 h1:9i3RxcPv3PZnitoVGMPDKZSq1xW1gK1Xy3ArNOGZfEg=
|
||||
golang.org/x/time v0.8.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||
golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY=
|
||||
golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20181011042414-1f849cf54d09/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
@ -1230,6 +1249,8 @@ golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||
golang.org/x/tools v0.28.0 h1:WuB6qZ4RPCQo5aP3WdKZS7i595EdWqWR8vqJTlwTVK8=
|
||||
golang.org/x/tools v0.28.0/go.mod h1:dcIOrVd3mfQKTgrDVQHqCPMWy6lnhfhtX3hLXYVLfRw=
|
||||
golang.org/x/tools v0.29.0 h1:Xx0h3TtM9rzQpQuR4dKLrdglAmCEN5Oi+P74JdhdzXE=
|
||||
golang.org/x/tools v0.29.0/go.mod h1:KMQVMRsVxU6nHCFXrBPhDB8XncLNLM0lIy/F14RP588=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
@ -1280,8 +1301,12 @@ google.golang.org/genproto v0.0.0-20200527145253-8367513e4ece/go.mod h1:jDfRM7Fc
|
||||
google.golang.org/genproto v0.0.0-20201110150050-8816d57aaa9a/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250102185135-69823020774d h1:H8tOf8XM88HvKqLTxe755haY6r1fqqzLbEnfrmLXlSA=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250102185135-69823020774d/go.mod h1:2v7Z7gP2ZUOGsaFyxATQSRoBnKygqVq2Cwnvom7QiqY=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250106144421-5f5ef82da422 h1:GVIKPyP/kLIyVOgOnTwFOrvQaQUzOzGMCxgFUOEmm24=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250106144421-5f5ef82da422/go.mod h1:b6h1vNKhxaSoEI+5jc3PJUCustfli/mRab7295pY7rw=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250102185135-69823020774d h1:xJJRGY7TJcvIlpSrN3K6LAWgNFUILlO+OMAqtg9aqnw=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250102185135-69823020774d/go.mod h1:3ENsm/5D1mzDyhpzeRi1NR784I0BcofWBoSc5QqqMK4=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250106144421-5f5ef82da422 h1:3UsHvIr4Wc2aW4brOaSCmcxh9ksica6fHEr8P1XhkYw=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250106144421-5f5ef82da422/go.mod h1:3ENsm/5D1mzDyhpzeRi1NR784I0BcofWBoSc5QqqMK4=
|
||||
google.golang.org/grpc v0.0.0-20160317175043-d3ddb4469d5a/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
|
||||
google.golang.org/grpc v1.0.5/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
|
||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||
@ -1318,6 +1343,8 @@ google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQ
|
||||
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
google.golang.org/protobuf v1.36.1 h1:yBPeRvTftaleIgM3PZ/WBIZ7XM/eEYAaEyCwvyjq/gk=
|
||||
google.golang.org/protobuf v1.36.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
|
||||
google.golang.org/protobuf v1.36.2 h1:R8FeyR1/eLmkutZOM5CWghmo5itiG9z0ktFlTVLuTmU=
|
||||
google.golang.org/protobuf v1.36.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
|
||||
gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
gopkg.in/cenkalti/backoff.v2 v2.2.1 h1:eJ9UAg01/HIHG987TwxvnzK2MgxXq97YY6rYDpY9aII=
|
||||
@ -1357,6 +1384,7 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
|
||||
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
|
||||
gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk=
|
||||
gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8=
|
||||
|
@ -655,19 +655,6 @@ func (a App) WriteRecipeVersion(version string, dryRun bool) error {
|
||||
|
||||
splitted := strings.Split(line, ":")
|
||||
|
||||
if a.Recipe.Dirty {
|
||||
dirtyVersion = fmt.Sprintf("%s%s", version, config.DIRTY_DEFAULT)
|
||||
if strings.Contains(line, dirtyVersion) {
|
||||
skipped = true
|
||||
lines = append(lines, line)
|
||||
continue
|
||||
}
|
||||
|
||||
line = fmt.Sprintf("%s:%s", splitted[0], dirtyVersion)
|
||||
lines = append(lines, line)
|
||||
continue
|
||||
}
|
||||
|
||||
line = fmt.Sprintf("%s:%s", splitted[0], version)
|
||||
lines = append(lines, line)
|
||||
}
|
||||
|
@ -223,16 +223,4 @@ func TestWriteRecipeVersionOverwrite(t *testing.T) {
|
||||
}
|
||||
|
||||
assert.Equal(t, "foo", app.Recipe.EnvVersion)
|
||||
|
||||
app.Recipe.Dirty = true
|
||||
if err := app.WriteRecipeVersion("foo+U", false); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
app, err = appPkg.GetApp(testPkg.ExpectedAppFiles, testPkg.AppName)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
assert.Equal(t, "foo+U", app.Recipe.EnvVersion)
|
||||
}
|
||||
|
@ -44,6 +44,16 @@ func SetChaosVersionLabel(compose *composetypes.Config, stackName string, chaosV
|
||||
}
|
||||
}
|
||||
|
||||
func SetVersionLabel(compose *composetypes.Config, stackName string, version string) {
|
||||
for _, service := range compose.Services {
|
||||
if service.Name == "app" {
|
||||
log.Debugf("set label 'coop-cloud.%s.version' to %v for %s", stackName, version, stackName)
|
||||
labelKey := fmt.Sprintf("coop-cloud.%s.version", stackName)
|
||||
service.Deploy.Labels[labelKey] = version
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// SetUpdateLabel adds env ENABLE_AUTO_UPDATE as label to enable/disable the
|
||||
// auto update process for this app. The default if this variable is not set is to disable
|
||||
// the auto update process.
|
||||
|
@ -192,7 +192,7 @@ func TestEnvVarCommentsRemoved(t *testing.T) {
|
||||
|
||||
envVar, exists = envSample["SECRET_TEST_PASS_TWO_VERSION"]
|
||||
if !exists {
|
||||
t.Fatal("WITH_COMMENT env var should be present in .env.sample")
|
||||
t.Fatal("SECRET_TEST_PASS_TWO_VERSION env var should be present in .env.sample")
|
||||
}
|
||||
|
||||
if strings.Contains(envVar, "length") {
|
||||
|
@ -4,11 +4,14 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
"slices"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"coopcloud.tech/abra/pkg/config"
|
||||
"coopcloud.tech/abra/pkg/formatter"
|
||||
gitPkg "coopcloud.tech/abra/pkg/git"
|
||||
"coopcloud.tech/abra/pkg/log"
|
||||
"coopcloud.tech/tagcmp"
|
||||
"github.com/distribution/reference"
|
||||
"github.com/go-git/go-git/v5"
|
||||
"github.com/go-git/go-git/v5/plumbing"
|
||||
@ -43,6 +46,9 @@ func (r Recipe) Ensure(ctx EnsureContext) error {
|
||||
|
||||
if r.EnvVersion != "" && !ctx.IgnoreEnvVersion {
|
||||
log.Debugf("ensuring env version %s", r.EnvVersion)
|
||||
if strings.Contains(r.EnvVersion, "+U") {
|
||||
log.Fatalf("can not redeploy chaos version (%s) without --chaos", r.EnvVersion)
|
||||
}
|
||||
|
||||
if _, err := r.EnsureVersion(r.EnvVersion); err != nil {
|
||||
return err
|
||||
@ -272,19 +278,14 @@ func (r Recipe) EnsureUpToDate() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsDirty checks whether a recipe is dirty or not. N.B., if you call IsDirty
|
||||
// from another Recipe method, you should propagate the pointer reference (*).
|
||||
func (r *Recipe) IsDirty() error {
|
||||
// IsDirty checks whether a recipe is dirty or not.
|
||||
func (r *Recipe) IsDirty() (bool, error) {
|
||||
isClean, err := gitPkg.IsClean(r.Dir)
|
||||
if err != nil {
|
||||
return err
|
||||
return false, err
|
||||
}
|
||||
|
||||
if !isClean {
|
||||
r.Dirty = true
|
||||
}
|
||||
|
||||
return nil
|
||||
return !isClean, nil
|
||||
}
|
||||
|
||||
// ChaosVersion constructs a chaos mode recipe version.
|
||||
@ -298,8 +299,12 @@ func (r *Recipe) ChaosVersion() (string, error) {
|
||||
|
||||
version = formatter.SmallSHA(head.String())
|
||||
|
||||
if err := r.IsDirty(); err != nil {
|
||||
return version, err
|
||||
dirty, err := r.IsDirty()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if dirty {
|
||||
return fmt.Sprintf("%s%s", version, config.DIRTY_DEFAULT), nil
|
||||
}
|
||||
|
||||
return version, nil
|
||||
@ -345,6 +350,18 @@ func (r Recipe) Tags() ([]string, error) {
|
||||
return tags, err
|
||||
}
|
||||
|
||||
sort.Slice(tags, func(i, j int) bool {
|
||||
version1, err := tagcmp.Parse(tags[i])
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
version2, err := tagcmp.Parse(tags[j])
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return version1.IsLessThan(version2)
|
||||
})
|
||||
|
||||
log.Debugf("detected %s as tags for recipe %s", strings.Join(tags, ", "), r.Name)
|
||||
|
||||
return tags, nil
|
||||
|
@ -15,10 +15,6 @@ func TestIsDirty(t *testing.T) {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if err := r.IsDirty(); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
assert.False(t, r.Dirty)
|
||||
|
||||
fpath := filepath.Join(r.Dir, "foo.txt")
|
||||
@ -31,9 +27,10 @@ func TestIsDirty(t *testing.T) {
|
||||
os.Remove(fpath)
|
||||
})
|
||||
|
||||
if err := r.IsDirty(); err != nil {
|
||||
dirty, err := r.IsDirty()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
assert.True(t, r.Dirty)
|
||||
assert.True(t, dirty)
|
||||
}
|
||||
|
@ -12,6 +12,8 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/go-git/go-git/v5"
|
||||
|
||||
"coopcloud.tech/abra/pkg/catalogue"
|
||||
"coopcloud.tech/abra/pkg/config"
|
||||
"coopcloud.tech/abra/pkg/formatter"
|
||||
@ -20,7 +22,6 @@ import (
|
||||
"coopcloud.tech/abra/pkg/log"
|
||||
"coopcloud.tech/abra/pkg/web"
|
||||
"coopcloud.tech/tagcmp"
|
||||
"github.com/go-git/go-git/v5"
|
||||
)
|
||||
|
||||
// RecipeCatalogueURL is the only current recipe catalogue available.
|
||||
@ -119,22 +120,9 @@ type Features struct {
|
||||
SSO string `json:"sso"`
|
||||
}
|
||||
|
||||
func GetEnvVersionRaw(name string) (string, error) {
|
||||
var version string
|
||||
|
||||
if strings.Contains(name, ":") {
|
||||
split := strings.Split(name, ":")
|
||||
if len(split) > 2 {
|
||||
return version, fmt.Errorf("version seems invalid: %s", name)
|
||||
}
|
||||
version = split[1]
|
||||
}
|
||||
|
||||
return version, nil
|
||||
}
|
||||
|
||||
func Get(name string) Recipe {
|
||||
version := ""
|
||||
versionRaw := ""
|
||||
if strings.Contains(name, ":") {
|
||||
split := strings.Split(name, ":")
|
||||
if len(split) > 2 {
|
||||
@ -143,6 +131,7 @@ func Get(name string) Recipe {
|
||||
name = split[0]
|
||||
|
||||
version = split[1]
|
||||
versionRaw = version
|
||||
if strings.HasSuffix(version, config.DIRTY_DEFAULT) {
|
||||
version = strings.Replace(split[1], config.DIRTY_DEFAULT, "", 1)
|
||||
log.Debugf("removed dirty suffix from .env version: %s -> %s", split[1], version)
|
||||
@ -167,11 +156,12 @@ func Get(name string) Recipe {
|
||||
dir := path.Join(config.RECIPES_DIR, escapeRecipeName(name))
|
||||
|
||||
r := Recipe{
|
||||
Name: name,
|
||||
EnvVersion: version,
|
||||
Dir: dir,
|
||||
GitURL: gitURL,
|
||||
SSHURL: sshURL,
|
||||
Name: name,
|
||||
EnvVersion: version,
|
||||
EnvVersionRaw: versionRaw,
|
||||
Dir: dir,
|
||||
GitURL: gitURL,
|
||||
SSHURL: sshURL,
|
||||
|
||||
ComposePath: path.Join(dir, "compose.yml"),
|
||||
ReadmePath: path.Join(dir, "README.md"),
|
||||
@ -179,20 +169,23 @@ func Get(name string) Recipe {
|
||||
AbraShPath: path.Join(dir, "abra.sh"),
|
||||
}
|
||||
|
||||
if err := r.IsDirty(); err != nil && !errors.Is(err, git.ErrRepositoryNotExists) {
|
||||
dirty, err := r.IsDirty()
|
||||
if err != nil && !errors.Is(err, git.ErrRepositoryNotExists) {
|
||||
log.Fatalf("failed to check git status of %s: %s", r.Name, err)
|
||||
}
|
||||
r.Dirty = dirty
|
||||
|
||||
return r
|
||||
}
|
||||
|
||||
type Recipe struct {
|
||||
Name string
|
||||
EnvVersion string
|
||||
Dirty bool // NOTE(d1): git terminology for unstaged changes
|
||||
Dir string
|
||||
GitURL string
|
||||
SSHURL string
|
||||
Name string
|
||||
EnvVersion string
|
||||
EnvVersionRaw string
|
||||
Dirty bool // NOTE(d1): git terminology for unstaged changes
|
||||
Dir string
|
||||
GitURL string
|
||||
SSHURL string
|
||||
|
||||
ComposePath string
|
||||
ReadmePath string
|
||||
|
@ -34,6 +34,7 @@ func TestGet(t *testing.T) {
|
||||
recipe: Recipe{
|
||||
Name: "foo",
|
||||
EnvVersion: "1.2.3",
|
||||
EnvVersionRaw: "1.2.3",
|
||||
Dir: path.Join(cfg.GetAbraDir(), "/recipes/foo"),
|
||||
GitURL: "https://git.coopcloud.tech/coop-cloud/foo.git",
|
||||
SSHURL: "ssh://git@git.coopcloud.tech:2222/coop-cloud/foo.git",
|
||||
@ -61,6 +62,22 @@ func TestGet(t *testing.T) {
|
||||
recipe: Recipe{
|
||||
Name: "mygit.org/myorg/cool-recipe",
|
||||
EnvVersion: "1.2.4",
|
||||
EnvVersionRaw: "1.2.4",
|
||||
Dir: path.Join(cfg.GetAbraDir(), "/recipes/mygit_org_myorg_cool-recipe"),
|
||||
GitURL: "https://mygit.org/myorg/cool-recipe.git",
|
||||
SSHURL: "ssh://git@mygit.org/myorg/cool-recipe.git",
|
||||
ComposePath: path.Join(cfg.GetAbraDir(), "recipes/mygit_org_myorg_cool-recipe/compose.yml"),
|
||||
ReadmePath: path.Join(cfg.GetAbraDir(), "recipes/mygit_org_myorg_cool-recipe/README.md"),
|
||||
SampleEnvPath: path.Join(cfg.GetAbraDir(), "recipes/mygit_org_myorg_cool-recipe/.env.sample"),
|
||||
AbraShPath: path.Join(cfg.GetAbraDir(), "recipes/mygit_org_myorg_cool-recipe/abra.sh"),
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "mygit.org/myorg/cool-recipe:1e83340e+U",
|
||||
recipe: Recipe{
|
||||
Name: "mygit.org/myorg/cool-recipe",
|
||||
EnvVersion: "1e83340e",
|
||||
EnvVersionRaw: "1e83340e+U",
|
||||
Dir: path.Join(cfg.GetAbraDir(), "/recipes/mygit_org_myorg_cool-recipe"),
|
||||
GitURL: "https://mygit.org/myorg/cool-recipe.git",
|
||||
SSHURL: "ssh://git@mygit.org/myorg/cool-recipe.git",
|
||||
@ -105,16 +122,3 @@ func TestGetVersionLabelLocalDoesNotUseTimeoutLabel(t *testing.T) {
|
||||
assert.NotEqual(t, label, defaultTimeoutLabel)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDirtyMarkerRemoved(t *testing.T) {
|
||||
r := Get("abra-test-recipe:1e83340e+U")
|
||||
assert.Equal(t, "1e83340e", r.EnvVersion)
|
||||
}
|
||||
|
||||
func TestGetEnvVersionRaw(t *testing.T) {
|
||||
v, err := GetEnvVersionRaw("abra-test-recipe:1e83340e+U")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
assert.Equal(t, "1e83340e+U", v)
|
||||
}
|
||||
|
@ -50,6 +50,9 @@ teardown(){
|
||||
assert_failure
|
||||
assert_output --partial 'locally unstaged changes'
|
||||
|
||||
assert_exists "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
assert_equal "$(_git_status)" "?? foo"
|
||||
|
||||
run rm -rf "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
assert_not_exists "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
}
|
||||
@ -62,6 +65,9 @@ teardown(){
|
||||
run $ABRA app check "$TEST_APP_DOMAIN" --chaos
|
||||
assert_success
|
||||
|
||||
assert_exists "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
assert_equal "$(_git_status)" "?? foo"
|
||||
|
||||
run rm -rf "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
assert_not_exists "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
}
|
||||
|
@ -53,6 +53,9 @@ teardown(){
|
||||
assert_failure
|
||||
assert_output --partial 'locally unstaged changes'
|
||||
|
||||
assert_exists "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
assert_equal "$(_git_status)" "?? foo"
|
||||
|
||||
run rm -rf "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
assert_not_exists "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
}
|
||||
@ -66,6 +69,9 @@ teardown(){
|
||||
assert_success
|
||||
assert_output --partial 'baz'
|
||||
|
||||
assert_exists "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
assert_equal "$(_git_status)" "?? foo"
|
||||
|
||||
run rm -rf "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
assert_not_exists "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
}
|
||||
|
@ -24,6 +24,9 @@ teardown(){
|
||||
_rm_remote "/etc/*.txt"
|
||||
|
||||
_rm "$BATS_TMPDIR/mydir"
|
||||
|
||||
run rm -rf "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
assert_not_exists "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
}
|
||||
|
||||
@test "validate app argument" {
|
||||
@ -34,6 +37,42 @@ teardown(){
|
||||
assert_failure
|
||||
}
|
||||
|
||||
@test "bail if unstaged changes and no --chaos" {
|
||||
_mkdir "$BATS_TMPDIR/mydir"
|
||||
_mkfile "$BATS_TMPDIR/mydir/myfile.txt" "foo"
|
||||
|
||||
run bash -c "echo foo >> $ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
assert_success
|
||||
assert_exists "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
|
||||
run $ABRA app cp "$TEST_APP_DOMAIN" "$BATS_TMPDIR/mydir" app:/etc
|
||||
assert_failure
|
||||
|
||||
assert_exists "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
assert_equal "$(_git_status)" "?? foo"
|
||||
|
||||
run rm -rf "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
assert_not_exists "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
}
|
||||
|
||||
@test "do not bail if unstaged changes and --chaos" {
|
||||
_mkdir "$BATS_TMPDIR/mydir"
|
||||
_mkfile "$BATS_TMPDIR/mydir/myfile.txt" "foo"
|
||||
|
||||
run bash -c "echo foo >> $ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
assert_success
|
||||
assert_exists "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
|
||||
run $ABRA app cp "$TEST_APP_DOMAIN" "$BATS_TMPDIR/mydir" app:/etc --chaos
|
||||
assert_success
|
||||
|
||||
assert_exists "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
assert_equal "$(_git_status)" "?? foo"
|
||||
|
||||
run rm -rf "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
assert_not_exists "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
}
|
||||
|
||||
@test "error if missing src/dest arguments" {
|
||||
run $ABRA app cp "$TEST_APP_DOMAIN"
|
||||
assert_failure
|
||||
|
@ -21,8 +21,10 @@ setup(){
|
||||
|
||||
teardown(){
|
||||
_reset_recipe
|
||||
_reset_app
|
||||
_undeploy_app
|
||||
_undeploy_app2 "gitea.$TEST_SERVER"
|
||||
|
||||
_reset_app
|
||||
_reset_tags
|
||||
|
||||
run rm -rf "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
@ -46,6 +48,9 @@ teardown(){
|
||||
assert_success
|
||||
assert_output --partial 'foo'
|
||||
|
||||
assert_exists "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
assert_equal "$(_git_status)" "?? foo"
|
||||
|
||||
run $ABRA app deploy "$TEST_APP_DOMAIN" --no-input
|
||||
assert_failure
|
||||
assert_output --partial 'locally unstaged changes'
|
||||
@ -62,6 +67,9 @@ teardown(){
|
||||
assert_success
|
||||
assert_output --partial 'foo'
|
||||
|
||||
assert_exists "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
assert_equal "$(_git_status)" "?? foo"
|
||||
|
||||
run $ABRA app deploy "$TEST_APP_DOMAIN" \
|
||||
--chaos --no-input --no-converge-checks
|
||||
assert_success
|
||||
@ -216,19 +224,6 @@ teardown(){
|
||||
run $ABRA app deploy "gitea.$TEST_SERVER" --no-input --no-converge-checks
|
||||
assert_success
|
||||
assert_output --partial "$latestVersion"
|
||||
|
||||
run $ABRA app undeploy "gitea.$TEST_SERVER" --no-input
|
||||
assert_success
|
||||
|
||||
run $ABRA app secret remove "gitea.$TEST_SERVER" --all --no-input
|
||||
assert_success
|
||||
|
||||
run $ABRA app volume remove "gitea.$TEST_SERVER" --no-input
|
||||
assert_success
|
||||
|
||||
run $ABRA app remove "gitea.$TEST_SERVER" --no-input
|
||||
assert_success
|
||||
assert_not_exists "$ABRA_DIR/servers/$TEST_SERVER/gitea.$TEST_SERVER.env"
|
||||
}
|
||||
|
||||
# bats test_tags=slow
|
||||
@ -423,3 +418,12 @@ teardown(){
|
||||
assert_success
|
||||
assert_output --partial "$latestRelease"
|
||||
}
|
||||
|
||||
# bats test_tags=slow
|
||||
@test "no chaos version label if no chaos" {
|
||||
_deploy_app
|
||||
|
||||
run $ABRA app labels "$TEST_APP_DOMAIN" --no-input
|
||||
assert_success
|
||||
refute_output --regexp "coop-cloud.abra-test-recipe.$TEST_SERVER.chaos-version"
|
||||
}
|
||||
|
@ -54,13 +54,21 @@ teardown(){
|
||||
}
|
||||
|
||||
# bats test_tags=slow
|
||||
@test "chaos commit written to env" {
|
||||
@test "deploy commit written to env and redeploy keeps that version" {
|
||||
run $ABRA app deploy "$TEST_APP_DOMAIN" "1e83340e" --no-input --no-converge-checks
|
||||
assert_success
|
||||
|
||||
run grep -q "TYPE=$TEST_RECIPE:1e83340e" \
|
||||
"$ABRA_DIR/servers/$TEST_SERVER/$TEST_APP_DOMAIN.env"
|
||||
assert_success
|
||||
|
||||
run $ABRA app deploy "$TEST_APP_DOMAIN" \
|
||||
--force --no-input --no-converge-checks
|
||||
assert_success
|
||||
|
||||
run grep -q "TYPE=$TEST_RECIPE:1e83340e" \
|
||||
"$ABRA_DIR/servers/$TEST_SERVER/$TEST_APP_DOMAIN.env"
|
||||
assert_success
|
||||
}
|
||||
|
||||
# bats test_tags=slow
|
||||
@ -98,12 +106,15 @@ teardown(){
|
||||
}
|
||||
|
||||
# bats test_tags=slow
|
||||
@test "deploy overwrites chaos deploy" {
|
||||
run $ABRA app deploy "$TEST_APP_DOMAIN" "1e83340e" \
|
||||
--no-input --no-converge-checks
|
||||
@test "takes deployed version when no .env version is present " {
|
||||
run $ABRA app deploy "$TEST_APP_DOMAIN" "0.1.0+1.20.0" --no-input --no-converge-checks --ignore-env-version
|
||||
assert_success
|
||||
|
||||
run grep -q "TYPE=$TEST_RECIPE:1e83340e" \
|
||||
run grep -q "TYPE=$TEST_RECIPE:0.1.0+1.20.0" \
|
||||
"$ABRA_DIR/servers/$TEST_SERVER/$TEST_APP_DOMAIN.env"
|
||||
assert_success
|
||||
|
||||
run sed -i 's/TYPE=abra-test-recipe:.*/TYPE=abra-test-recipe/g' \
|
||||
"$ABRA_DIR/servers/$TEST_SERVER/$TEST_APP_DOMAIN.env"
|
||||
assert_success
|
||||
|
||||
@ -111,7 +122,7 @@ teardown(){
|
||||
--force --no-input --no-converge-checks
|
||||
assert_success
|
||||
|
||||
run grep -q "TYPE=$TEST_RECIPE:1e83340e" \
|
||||
run grep -q "TYPE=$TEST_RECIPE:0.1.0+1.20.0" \
|
||||
"$ABRA_DIR/servers/$TEST_SERVER/$TEST_APP_DOMAIN.env"
|
||||
assert_failure
|
||||
assert_success
|
||||
}
|
||||
|
@ -20,8 +20,8 @@ setup(){
|
||||
|
||||
teardown(){
|
||||
_reset_recipe
|
||||
_reset_app
|
||||
_undeploy_app
|
||||
_reset_app
|
||||
_reset_tags
|
||||
|
||||
run rm -rf "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
@ -37,17 +37,10 @@ teardown(){
|
||||
--no-input --no-converge-checks
|
||||
assert_success
|
||||
|
||||
# current deployment
|
||||
assert_output --regexp 'VERSION.*N/A'
|
||||
assert_output --regexp 'CHAOS.*false'
|
||||
|
||||
# new deployment
|
||||
assert_output --regexp 'VERSION.* ' + "${latestRelease}"
|
||||
assert_output --regexp 'CHAOS.*false'
|
||||
|
||||
# env version
|
||||
assert_output --regexp 'CURRENT VERSION.*N/A'
|
||||
assert_output --regexp 'NEW VERSION.*' + "${latestRelease}"
|
||||
assert_output --partial 'NEW DEPLOY OVERVIEW'
|
||||
assert_output --partial 'CURRENT DEPLOYMENT N/A'
|
||||
assert_output --partial 'ENV VERSION N/A'
|
||||
assert_output --partial "NEW DEPLOYMENT ${latestRelease}"
|
||||
|
||||
run grep -q "TYPE=$TEST_RECIPE:${latestRelease}" \
|
||||
"$ABRA_DIR/servers/$TEST_SERVER/$TEST_APP_DOMAIN.env"
|
||||
@ -61,17 +54,10 @@ teardown(){
|
||||
--no-input --no-converge-checks
|
||||
assert_success
|
||||
|
||||
# current deployment
|
||||
assert_output --regexp 'VERSION.*N/A'
|
||||
assert_output --regexp 'CHAOS.*false'
|
||||
|
||||
# new deployment
|
||||
assert_output --regexp 'VERSION.* ' + "${latestRelease}"
|
||||
assert_output --regexp 'CHAOS.*false'
|
||||
|
||||
# env version
|
||||
assert_output --regexp 'CURRENT VERSION.*' + "${latestRelease}"
|
||||
assert_output --regexp 'NEW VERSION.*' + "${latestRelease}"
|
||||
assert_output --partial 'NEW DEPLOY OVERVIEW'
|
||||
assert_output --partial "CURRENT DEPLOYMENT N/A"
|
||||
assert_output --partial "ENV VERSION ${latestRelease}"
|
||||
assert_output --partial "NEW DEPLOYMENT ${latestRelease}"
|
||||
|
||||
run grep -q "TYPE=$TEST_RECIPE:${latestRelease}" \
|
||||
"$ABRA_DIR/servers/$TEST_SERVER/$TEST_APP_DOMAIN.env"
|
||||
@ -90,17 +76,10 @@ teardown(){
|
||||
--no-input --no-converge-checks
|
||||
assert_success
|
||||
|
||||
# current deployment
|
||||
assert_output --regexp 'VERSION.*N/A'
|
||||
assert_output --regexp 'CHAOS.*false'
|
||||
|
||||
# new deployment
|
||||
assert_output --regexp 'VERSION.*' + "0.1.1+1.20.2"
|
||||
assert_output --regexp 'CHAOS.*false'
|
||||
|
||||
# env version
|
||||
assert_output --regexp 'CURRENT VERSION.*' + "0.1.1+1.20.2"
|
||||
assert_output --regexp 'NEW VERSION.*' + "0.1.1+1.20.2"
|
||||
assert_output --partial 'NEW DEPLOY OVERVIEW'
|
||||
assert_output --partial "CURRENT DEPLOYMENT N/A"
|
||||
assert_output --partial "ENV VERSION 0.1.1+1.20.2"
|
||||
assert_output --partial "NEW DEPLOYMENT 0.1.1+1.20.2"
|
||||
|
||||
run grep -q "TYPE=$TEST_RECIPE:0.1.1+1.20.2" \
|
||||
"$ABRA_DIR/servers/$TEST_SERVER/$TEST_APP_DOMAIN.env"
|
||||
@ -120,17 +99,10 @@ teardown(){
|
||||
--no-input --no-converge-checks --ignore-env-version
|
||||
assert_success
|
||||
|
||||
# current deployment
|
||||
assert_output --regexp 'VERSION.*N/A'
|
||||
assert_output --regexp 'CHAOS.*false'
|
||||
|
||||
# new deployment
|
||||
assert_output --regexp 'VERSION.*' + "${latestRelease}"
|
||||
assert_output --regexp 'CHAOS.*false'
|
||||
|
||||
# env version
|
||||
assert_output --regexp 'CURRENT VERSION.*' + "0.1.1+1.20.2"
|
||||
assert_output --regexp 'NEW VERSION.*' + "${latestRelease}"
|
||||
assert_output --partial 'NEW DEPLOY OVERVIEW'
|
||||
assert_output --partial "CURRENT DEPLOYMENT N/A"
|
||||
assert_output --partial "ENV VERSION 0.1.1+1.20.2"
|
||||
assert_output --partial "NEW DEPLOYMENT ${latestRelease}"
|
||||
|
||||
run grep -q "TYPE=$TEST_RECIPE:${latestRelease}" \
|
||||
"$ABRA_DIR/servers/$TEST_SERVER/$TEST_APP_DOMAIN.env"
|
||||
@ -153,17 +125,10 @@ teardown(){
|
||||
--no-input --no-converge-checks --chaos
|
||||
assert_success
|
||||
|
||||
# current deployment
|
||||
assert_output --regexp 'VERSION.*' + "${latestRelease}"
|
||||
assert_output --regexp 'CHAOS.*false'
|
||||
|
||||
# new deployment
|
||||
assert_output --regexp 'VERSION.*' + "${latestRelease}"
|
||||
assert_output --regexp 'CHAOS.*' + "${headHash:0:8}+U"
|
||||
|
||||
# env version
|
||||
assert_output --regexp 'CURRENT VERSION.*' + "${latestRelease}"
|
||||
assert_output --regexp 'NEW VERSION.*' + "${headHash:0:8}+U"
|
||||
assert_output --partial 'CHAOS DEPLOY OVERVIEW'
|
||||
assert_output --partial "CURRENT DEPLOYMENT ${latestRelease}"
|
||||
assert_output --partial "ENV VERSION ${latestRelease}"
|
||||
assert_output --partial "NEW DEPLOYMENT ${headHash:0:8}+U"
|
||||
|
||||
run rm -rf "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
assert_not_exists "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
@ -173,7 +138,7 @@ teardown(){
|
||||
assert_success
|
||||
}
|
||||
|
||||
@test "chaos deploy then force deploy" {
|
||||
@test "can not redeploy chaos version without --chaos" {
|
||||
headHash=$(_get_head_hash)
|
||||
latestRelease=$(_latest_release)
|
||||
|
||||
@ -189,27 +154,12 @@ teardown(){
|
||||
assert_not_exists "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
|
||||
run $ABRA app deploy "$TEST_APP_DOMAIN" \
|
||||
--no-input --no-converge-checks --force
|
||||
assert_success
|
||||
|
||||
# current deployment
|
||||
assert_output --regexp 'VERSION.*' + "${latestRelease}"
|
||||
assert_output --regexp 'CHAOS.*' + "${headHash:0:8}+U"
|
||||
|
||||
# new deployment
|
||||
assert_output --regexp 'VERSION.*' + "${latestRelease}"
|
||||
assert_output --regexp 'CHAOS.*false'
|
||||
|
||||
# env version
|
||||
assert_output --regexp 'CURRENT VERSION.*' + "${headHash:0:8}+U"
|
||||
assert_output --regexp 'NEW VERSION.*' + "${latestRelease}"
|
||||
|
||||
run grep -q "TYPE=$TEST_RECIPE:${latestRelease}" \
|
||||
"$ABRA_DIR/servers/$TEST_SERVER/$TEST_APP_DOMAIN.env"
|
||||
assert_success
|
||||
--no-input --no-converge-checks --force --debug
|
||||
assert_failure
|
||||
assert_output --regexp 'can not redeploy chaos version .*' + "${headHash:0:8}+U"
|
||||
}
|
||||
|
||||
@test "deploy then force chaos commit deploy" {
|
||||
@test "deploy then force commit deploy" {
|
||||
headHash=$(_get_head_hash)
|
||||
latestRelease=$(_latest_release)
|
||||
|
||||
@ -225,17 +175,10 @@ teardown(){
|
||||
--no-input --no-converge-checks --force
|
||||
assert_success
|
||||
|
||||
# current deployment
|
||||
assert_output --regexp 'VERSION.*' + "${latestRelease}"
|
||||
assert_output --regexp 'CHAOS.*false'
|
||||
|
||||
# new deployment
|
||||
assert_output --regexp 'VERSION.*' + "${latestRelease}"
|
||||
assert_output --regexp 'CHAOS.*' + "${headHash:0:8}"
|
||||
|
||||
# env version
|
||||
assert_output --regexp 'CURRENT VERSION.*' + "${latestRelease}"
|
||||
assert_output --regexp 'NEW VERSION.*' + "${headHash:0:8}"
|
||||
assert_output --partial 'DEPLOY OVERVIEW'
|
||||
assert_output --partial "CURRENT DEPLOYMENT ${latestRelease}"
|
||||
assert_output --partial "ENV VERSION ${latestRelease}"
|
||||
assert_output --partial "NEW DEPLOYMENT ${headHash:0:8}"
|
||||
|
||||
run grep -q "TYPE=$TEST_RECIPE:${headHash:0:8}" \
|
||||
"$ABRA_DIR/servers/$TEST_SERVER/$TEST_APP_DOMAIN.env"
|
||||
@ -250,17 +193,10 @@ teardown(){
|
||||
--no-input --no-converge-checks --chaos
|
||||
assert_success
|
||||
|
||||
# current deployment
|
||||
assert_output --regexp 'VERSION.*N/A'
|
||||
assert_output --regexp 'CHAOS.*false'
|
||||
|
||||
# new deployment
|
||||
assert_output --regexp 'VERSION.*' + "${latestRelease}"
|
||||
assert_output --regexp 'CHAOS.*' + "${headHash:0:8}"
|
||||
|
||||
# env version
|
||||
assert_output --regexp 'CURRENT VERSION.*' + "${latestRelease}"
|
||||
assert_output --regexp 'NEW VERSION.*' + "${headHash:0:8}"
|
||||
assert_output --partial 'NEW DEPLOY OVERVIEW'
|
||||
assert_output --partial "CURRENT DEPLOYMENT N/A"
|
||||
assert_output --partial "ENV VERSION ${latestRelease}"
|
||||
assert_output --partial "NEW DEPLOYMENT ${headHash:0:8}"
|
||||
|
||||
run bash -c 'echo "unstaged changes" >> "$ABRA_DIR/recipes/$TEST_RECIPE/foo"'
|
||||
assert_success
|
||||
@ -270,17 +206,28 @@ teardown(){
|
||||
--no-input --no-converge-checks --chaos
|
||||
assert_success
|
||||
|
||||
# current deployment
|
||||
assert_output --regexp 'VERSION.*' + "${latestRelease}"
|
||||
assert_output --regexp 'CHAOS.*' + "${headHash:0:8}"
|
||||
assert_output --partial 'CHAOS DEPLOY OVERVIEW'
|
||||
assert_output --partial "CURRENT DEPLOYMENT ${headHash:0:8}"
|
||||
assert_output --partial "ENV VERSION ${headHash:0:8}"
|
||||
assert_output --partial "NEW DEPLOYMENT ${headHash:0:8}+U"
|
||||
|
||||
# new deployment
|
||||
assert_output --regexp 'VERSION.*' + "${latestRelease}"
|
||||
assert_output --regexp 'CHAOS.*' + "${headHash:0:8}+U"
|
||||
run $ABRA app deploy "$TEST_APP_DOMAIN" \
|
||||
--no-input --no-converge-checks --chaos
|
||||
assert_success
|
||||
|
||||
# env version
|
||||
assert_output --regexp 'CURRENT VERSION.*' + "${latestRelease}"
|
||||
assert_output --regexp 'NEW VERSION.*' + "${headHash:0:8}+U"
|
||||
assert_output --partial 'CHAOS DEPLOY OVERVIEW'
|
||||
assert_output --partial "CURRENT DEPLOYMENT ${headHash:0:8}+U"
|
||||
assert_output --partial "ENV VERSION ${headHash:0:8}+U"
|
||||
assert_output --partial "NEW DEPLOYMENT ${headHash:0:8}+U"
|
||||
|
||||
run $ABRA app deploy "$TEST_APP_DOMAIN" \
|
||||
--no-input --no-converge-checks --chaos
|
||||
assert_success
|
||||
|
||||
assert_output --partial 'CHAOS DEPLOY OVERVIEW'
|
||||
assert_output --partial "CURRENT DEPLOYMENT ${headHash:0:8}+U"
|
||||
assert_output --partial "ENV VERSION ${headHash:0:8}+U"
|
||||
assert_output --partial "NEW DEPLOYMENT ${headHash:0:8}+U"
|
||||
|
||||
run grep -q "TYPE=$TEST_RECIPE:${headHash:0:8}" \
|
||||
"$ABRA_DIR/servers/$TEST_SERVER/$TEST_APP_DOMAIN.env"
|
||||
@ -302,19 +249,8 @@ teardown(){
|
||||
--no-input --no-converge-checks --force
|
||||
assert_success
|
||||
|
||||
# current deployment
|
||||
assert_output --regexp 'VERSION.*' + "${latestRelease}"
|
||||
assert_output --regexp 'CHAOS.*' + "${headHash:0:8}"
|
||||
|
||||
# new deployment
|
||||
assert_output --regexp 'VERSION.*' + "${latestRelease}"
|
||||
assert_output --regexp 'CHAOS.*false'
|
||||
|
||||
# env version
|
||||
assert_output --regexp 'CURRENT VERSION.*' + "${headHash:0:8}"
|
||||
assert_output --regexp 'NEW VERSION.*' + "${latestRelease}"
|
||||
|
||||
run grep -q "TYPE=$TEST_RECIPE:${latestRelease}" \
|
||||
"$ABRA_DIR/servers/$TEST_SERVER/$TEST_APP_DOMAIN.env"
|
||||
assert_success
|
||||
assert_output --partial 'REDEPLOY OVERVIEW'
|
||||
assert_output --partial "CURRENT DEPLOYMENT ${headHash:0:8}"
|
||||
assert_output --partial "ENV VERSION ${headHash:0:8}"
|
||||
assert_output --partial "NEW DEPLOYMENT ${headHash:0:8}"
|
||||
}
|
||||
|
@ -20,8 +20,11 @@ setup(){
|
||||
|
||||
teardown(){
|
||||
_reset_recipe
|
||||
_reset_app
|
||||
_undeploy_app
|
||||
_reset_app
|
||||
|
||||
run rm -rf "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
assert_not_exists "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
}
|
||||
|
||||
@test "validate app argument" {
|
||||
@ -41,6 +44,16 @@ teardown(){
|
||||
}
|
||||
|
||||
@test "show env version despite --chaos" {
|
||||
run bash -c "echo foo >> $ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
assert_success
|
||||
assert_exists "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
|
||||
run $ABRA app env "$TEST_APP_DOMAIN"
|
||||
assert_success
|
||||
|
||||
assert_exists "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
assert_equal "$(_git_status)" "?? foo"
|
||||
|
||||
run rm -rf "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
assert_not_exists "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
}
|
||||
|
@ -21,8 +21,8 @@ setup(){
|
||||
|
||||
teardown(){
|
||||
_reset_recipe
|
||||
_reset_app
|
||||
_undeploy_app
|
||||
_reset_app
|
||||
_reset_tags
|
||||
|
||||
run rm -rf "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
@ -46,6 +46,9 @@ teardown(){
|
||||
assert_success
|
||||
assert_output --partial 'foo'
|
||||
|
||||
assert_exists "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
assert_equal "$(_git_status)" "?? foo"
|
||||
|
||||
run $ABRA app labels "$TEST_APP_DOMAIN" --no-input
|
||||
assert_failure
|
||||
}
|
||||
@ -59,6 +62,9 @@ teardown(){
|
||||
assert_success
|
||||
assert_output --partial 'foo'
|
||||
|
||||
assert_exists "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
assert_equal "$(_git_status)" "?? foo"
|
||||
|
||||
run $ABRA app labels "$TEST_APP_DOMAIN" --chaos
|
||||
assert_success
|
||||
}
|
||||
|
@ -20,6 +20,10 @@ setup(){
|
||||
teardown(){
|
||||
_rm_app
|
||||
_reset_recipe
|
||||
_reset_tags
|
||||
|
||||
run rm -rf "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
assert_not_exists "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
}
|
||||
|
||||
@test "create new app" {
|
||||
@ -47,25 +51,22 @@ teardown(){
|
||||
assert_success
|
||||
assert_exists "$ABRA_DIR/servers/$TEST_SERVER/$TEST_APP_DOMAIN.env"
|
||||
|
||||
assert_equal $(_get_tag_hash 0.3.0+1.21.0) $(_get_current_hash)
|
||||
|
||||
run grep -q "TYPE=$TEST_RECIPE:0.3.0+1.21.0" \
|
||||
"$ABRA_DIR/servers/$TEST_SERVER/$TEST_APP_DOMAIN.env"
|
||||
assert_success
|
||||
}
|
||||
|
||||
@test "create new app with chaos commit" {
|
||||
run $ABRA app new "$TEST_RECIPE" 1e83340e \
|
||||
@test "create new app with version commit" {
|
||||
tagHash=$(_get_tag_hash "0.3.0+1.21.0")
|
||||
|
||||
run $ABRA app new "$TEST_RECIPE" "$tagHash" \
|
||||
--no-input \
|
||||
--server "$TEST_SERVER" \
|
||||
--domain "$TEST_APP_DOMAIN"
|
||||
assert_success
|
||||
assert_exists "$ABRA_DIR/servers/$TEST_SERVER/$TEST_APP_DOMAIN.env"
|
||||
|
||||
currentHash=$(_get_current_hash)
|
||||
assert_equal 1e83340e ${currentHash:0:8}
|
||||
|
||||
run grep -q "TYPE=$TEST_RECIPE:1e83340e" \
|
||||
run grep -q "TYPE=$TEST_RECIPE:${tagHash}" \
|
||||
"$ABRA_DIR/servers/$TEST_SERVER/$TEST_APP_DOMAIN.env"
|
||||
assert_success
|
||||
}
|
||||
@ -101,6 +102,9 @@ teardown(){
|
||||
assert_failure
|
||||
assert_output --partial 'locally unstaged changes'
|
||||
|
||||
assert_exists "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
assert_equal "$(_git_status)" "?? foo"
|
||||
|
||||
run rm -rf "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
assert_not_exists "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
}
|
||||
@ -122,6 +126,13 @@ teardown(){
|
||||
assert_success
|
||||
assert_exists "$ABRA_DIR/servers/$TEST_SERVER/$TEST_APP_DOMAIN.env"
|
||||
|
||||
assert_exists "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
assert_equal "$(_git_status)" "?? foo"
|
||||
|
||||
run git -C "$ABRA_DIR/recipes/$TEST_RECIPE" status
|
||||
assert_success
|
||||
assert_output --partial 'foo'
|
||||
|
||||
run rm -rf "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
assert_not_exists "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
}
|
||||
@ -167,6 +178,8 @@ teardown(){
|
||||
|
||||
# bats test_tags=slow
|
||||
@test "generate secrets" {
|
||||
latestRelease=$(_latest_release)
|
||||
|
||||
run $ABRA app new "$TEST_RECIPE" \
|
||||
--no-input \
|
||||
--server "$TEST_SERVER" \
|
||||
@ -178,4 +191,64 @@ teardown(){
|
||||
run $ABRA app secret ls "$TEST_APP_DOMAIN"
|
||||
assert_success
|
||||
assert_output --partial 'test_pass_one'
|
||||
|
||||
run grep -q "TYPE=$TEST_RECIPE:${latestRelease}" \
|
||||
"$ABRA_DIR/servers/$TEST_SERVER/$TEST_APP_DOMAIN.env"
|
||||
assert_success
|
||||
}
|
||||
|
||||
# bats test_tags=slow
|
||||
@test "app new from chaos recipe" {
|
||||
currentHash=$(_get_current_hash)
|
||||
latestRelease=$(_latest_release)
|
||||
|
||||
run bash -c "echo foo >> $ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
assert_success
|
||||
assert_exists "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
|
||||
run $ABRA app new "$TEST_RECIPE" \
|
||||
--no-input \
|
||||
--server "$TEST_SERVER" \
|
||||
--domain "$TEST_APP_DOMAIN" \
|
||||
--secrets \
|
||||
--chaos
|
||||
assert_success
|
||||
assert_exists "$ABRA_DIR/servers/$TEST_SERVER/$TEST_APP_DOMAIN.env"
|
||||
assert_output --partial "version: ${currentHash:0:8}"
|
||||
assert_output --partial "chaos: ${currentHash:0:8}"
|
||||
|
||||
assert_exists "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
assert_equal "$(_git_status)" "?? foo"
|
||||
|
||||
run grep -q "TYPE=$TEST_RECIPE:${currentHash:0:8}+U" \
|
||||
"$ABRA_DIR/servers/$TEST_SERVER/$TEST_APP_DOMAIN.env"
|
||||
assert_success
|
||||
}
|
||||
|
||||
# bats test_tags=slow
|
||||
@test "app new, no releases, from chaos recipe" {
|
||||
currentHash=$(_get_current_hash)
|
||||
_remove_tags
|
||||
|
||||
run bash -c "echo foo >> $ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
assert_success
|
||||
assert_exists "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
|
||||
run $ABRA app new "$TEST_RECIPE" \
|
||||
--no-input \
|
||||
--server "$TEST_SERVER" \
|
||||
--domain "$TEST_APP_DOMAIN" \
|
||||
--secrets \
|
||||
--chaos
|
||||
assert_success
|
||||
assert_exists "$ABRA_DIR/servers/$TEST_SERVER/$TEST_APP_DOMAIN.env"
|
||||
assert_output --partial "version: ${currentHash:0:8}"
|
||||
assert_output --partial "chaos: ${currentHash:0:8}"
|
||||
|
||||
assert_exists "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
assert_equal "$(_git_status)" "?? foo"
|
||||
|
||||
run grep -q "TYPE=$TEST_RECIPE:${currentHash:0:8}+U" \
|
||||
"$ABRA_DIR/servers/$TEST_SERVER/$TEST_APP_DOMAIN.env"
|
||||
assert_success
|
||||
}
|
||||
|
@ -55,6 +55,9 @@ teardown(){
|
||||
assert_failure
|
||||
assert_output --partial 'locally unstaged changes'
|
||||
|
||||
assert_exists "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
assert_equal "$(_git_status)" "?? foo"
|
||||
|
||||
run rm -rf "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
assert_not_exists "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
}
|
||||
@ -70,6 +73,9 @@ teardown(){
|
||||
run $ABRA app ps --chaos "$TEST_APP_DOMAIN"
|
||||
assert_success
|
||||
|
||||
assert_exists "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
assert_equal "$(_git_status)" "?? foo"
|
||||
|
||||
run rm -rf "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
assert_not_exists "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
}
|
||||
@ -117,6 +123,8 @@ teardown(){
|
||||
@test "show ps report" {
|
||||
_deploy_app
|
||||
|
||||
_ensure_env_version "$(_latest_release)"
|
||||
|
||||
run $ABRA app ps "$TEST_APP_DOMAIN"
|
||||
assert_success
|
||||
assert_output --partial 'app'
|
||||
|
@ -19,6 +19,7 @@ setup(){
|
||||
}
|
||||
|
||||
teardown(){
|
||||
_reset_app
|
||||
_undeploy_app
|
||||
_reset_recipe
|
||||
}
|
||||
@ -152,7 +153,7 @@ teardown(){
|
||||
}
|
||||
|
||||
# bats test_tags=slow
|
||||
@test "rollback chaos deployment" {
|
||||
@test "rollback chaos deployment is not possible" {
|
||||
tagHash=$(_get_tag_hash "0.2.0+1.21.0")
|
||||
run git -C "$ABRA_DIR/recipes/$TEST_RECIPE" checkout "$tagHash"
|
||||
assert_success
|
||||
@ -162,17 +163,8 @@ teardown(){
|
||||
assert_output --partial "${tagHash:0:8}"
|
||||
|
||||
run $ABRA app rollback "$TEST_APP_DOMAIN" "0.1.1+1.20.2" --no-input --no-converge-checks
|
||||
assert_success
|
||||
assert_output --partial "0.1.1+1.20.2"
|
||||
assert_output --partial "${tagHash:0:8}"
|
||||
|
||||
run $ABRA app rollback "$TEST_APP_DOMAIN" "0.1.0+1.20.0" --no-input --no-converge-checks
|
||||
assert_success
|
||||
assert_output --partial "0.1.0+1.20.0"
|
||||
|
||||
tagHash=$(_get_tag_hash "0.1.1+1.20.2")
|
||||
refute_output --partial "${tagHash:0:8}"
|
||||
assert_output --partial "false"
|
||||
assert_failure
|
||||
assert_output --partial 'current deployment' + "${tagHash:0:8}" + 'is not a known version'
|
||||
}
|
||||
|
||||
# bats test_tags=slow
|
||||
@ -185,3 +177,16 @@ teardown(){
|
||||
assert_failure
|
||||
assert_output --partial "not a known version"
|
||||
}
|
||||
|
||||
# bats test_tags=slow
|
||||
@test "no chaos version label if no chaos" {
|
||||
_deploy_app
|
||||
|
||||
run $ABRA app rollback "$TEST_APP_DOMAIN" \
|
||||
--no-input --no-converge-checks
|
||||
assert_success
|
||||
|
||||
run $ABRA app labels "$TEST_APP_DOMAIN" --no-input
|
||||
assert_success
|
||||
refute_output --regexp "coop-cloud.abra-test-recipe.$TEST_SERVER.chaos-version"
|
||||
}
|
||||
|
@ -20,10 +20,11 @@ setup(){
|
||||
|
||||
teardown(){
|
||||
_undeploy_app
|
||||
_reset_app
|
||||
_reset_recipe
|
||||
}
|
||||
|
||||
@test "deploy then rollback" {
|
||||
@test "deploy then rollback" {
|
||||
run $ABRA app deploy "$TEST_APP_DOMAIN" "0.2.0+1.21.0" \
|
||||
--no-input --no-converge-checks
|
||||
assert_success
|
||||
@ -32,24 +33,17 @@ teardown(){
|
||||
--no-input --no-converge-checks
|
||||
assert_success
|
||||
|
||||
# current deployment
|
||||
assert_output --regexp 'VERSION.*' + "0.2.0+1.21.0"
|
||||
assert_output --regexp 'CHAOS.*false'
|
||||
|
||||
# new deployment
|
||||
assert_output --regexp 'VERSION.*' + "0.1.0+1.20.0"
|
||||
assert_output --regexp 'CHAOS.*false'
|
||||
|
||||
# env version
|
||||
assert_output --regexp 'CURRENT VERSION.*' + "0.2.0+1.21.0"
|
||||
assert_output --regexp 'NEW VERSION.*' + "0.1.0+1.20.0"
|
||||
assert_output --partial 'DOWNGRADE OVERVIEW'
|
||||
assert_output --partial 'CURRENT DEPLOYMENT 0.2.0+1.21.0'
|
||||
assert_output --partial 'ENV VERSION 0.2.0+1.21.0'
|
||||
assert_output --partial 'NEW DEPLOYMENT 0.1.0+1.20.0'
|
||||
|
||||
run grep -q "TYPE=$TEST_RECIPE:0.1.0+1.20.0" \
|
||||
"$ABRA_DIR/servers/$TEST_SERVER/$TEST_APP_DOMAIN.env"
|
||||
assert_success
|
||||
}
|
||||
|
||||
@test "force rollback" {
|
||||
@test "force rollback" {
|
||||
run $ABRA app deploy "$TEST_APP_DOMAIN" "0.2.0+1.21.0" \
|
||||
--no-input --no-converge-checks
|
||||
assert_success
|
||||
@ -58,19 +52,33 @@ teardown(){
|
||||
--no-input --no-converge-checks --force
|
||||
assert_success
|
||||
|
||||
# current deployment
|
||||
assert_output --regexp 'VERSION.*' + "0.2.0+1.21.0"
|
||||
assert_output --regexp 'CHAOS.*false'
|
||||
|
||||
# new deployment
|
||||
assert_output --regexp 'VERSION.*' + "0.2.0+1.21.0"
|
||||
assert_output --regexp 'CHAOS.*false'
|
||||
|
||||
# env version
|
||||
assert_output --regexp 'CURRENT VERSION.*' + "0.2.0+1.21.0"
|
||||
assert_output --regexp 'NEW VERSION.*' + "0.2.0+1.21.0"
|
||||
assert_output --partial 'REDEPLOY OVERVIEW'
|
||||
assert_output --partial 'CURRENT DEPLOYMENT 0.2.0+1.21.0'
|
||||
assert_output --partial 'ENV VERSION 0.2.0+1.21.0'
|
||||
assert_output --partial 'NEW DEPLOYMENT 0.2.0+1.21.0'
|
||||
|
||||
run grep -q "TYPE=$TEST_RECIPE:0.2.0+1.21.0" \
|
||||
"$ABRA_DIR/servers/$TEST_SERVER/$TEST_APP_DOMAIN.env"
|
||||
assert_success
|
||||
}
|
||||
|
||||
@test "app rollback no .env version" {
|
||||
run $ABRA app deploy "$TEST_APP_DOMAIN" "0.2.0+1.21.0" \
|
||||
--no-input --no-converge-checks
|
||||
assert_success
|
||||
|
||||
_wipe_env_version
|
||||
|
||||
run $ABRA app rollback "$TEST_APP_DOMAIN" "0.1.0+1.20.0" \
|
||||
--no-input --no-converge-checks
|
||||
assert_success
|
||||
|
||||
assert_output --partial 'DOWNGRADE OVERVIEW'
|
||||
assert_output --partial 'CURRENT DEPLOYMENT 0.2.0+1.21.0'
|
||||
assert_output --partial 'ENV VERSION N/A'
|
||||
assert_output --partial 'NEW DEPLOYMENT 0.1.0+1.20.0'
|
||||
|
||||
run grep -q "TYPE=$TEST_RECIPE:${latestRelease}" \
|
||||
"$ABRA_DIR/servers/$TEST_SERVER/$TEST_APP_DOMAIN.env"
|
||||
assert_success
|
||||
}
|
||||
|
@ -41,6 +41,11 @@ teardown(){
|
||||
|
||||
run $ABRA app secret generate "$TEST_APP_DOMAIN"
|
||||
assert_failure
|
||||
assert_output --partial 'missing arguments'
|
||||
|
||||
run $ABRA app secret generate "$TEST_APP_DOMAIN" test_pass_one
|
||||
assert_failure
|
||||
assert_output --partial 'missing arguments'
|
||||
|
||||
run $ABRA app secret generate "$TEST_APP_DOMAIN" testSecret testVersion --all
|
||||
assert_failure
|
||||
@ -131,6 +136,9 @@ teardown(){
|
||||
assert_failure
|
||||
assert_output --partial 'locally unstaged changes'
|
||||
|
||||
assert_exists "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
assert_equal "$(_git_status)" "?? foo"
|
||||
|
||||
run rm -rf "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
assert_not_exists "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
}
|
||||
@ -271,6 +279,9 @@ teardown(){
|
||||
assert_failure
|
||||
assert_output --partial 'locally unstaged changes'
|
||||
|
||||
assert_exists "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
assert_equal "$(_git_status)" "?? foo"
|
||||
|
||||
run rm -rf "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
assert_not_exists "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
}
|
||||
@ -319,6 +330,9 @@ teardown(){
|
||||
assert_failure
|
||||
assert_output --partial 'locally unstaged changes'
|
||||
|
||||
assert_exists "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
assert_equal "$(_git_status)" "?? foo"
|
||||
|
||||
run rm -rf "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
assert_not_exists "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
}
|
||||
|
@ -92,9 +92,6 @@ teardown(){
|
||||
|
||||
run $ABRA app undeploy "$TEST_APP_DOMAIN" --no-input
|
||||
assert_success
|
||||
|
||||
# NOTE(d1): ensure not chaos undeploy
|
||||
assert_output --partial 'false'
|
||||
}
|
||||
|
||||
# bats test_tags=slow
|
||||
|
@ -33,13 +33,10 @@ teardown(){
|
||||
run $ABRA app undeploy "$TEST_APP_DOMAIN" --no-input
|
||||
assert_success
|
||||
|
||||
# current deployment
|
||||
assert_output --regexp 'VERSION.*' + "0.1.0+1.20.0"
|
||||
assert_output --regexp 'CHAOS.*false'
|
||||
|
||||
# env version
|
||||
assert_output --regexp 'CURRENT VERSION.*' + "0.1.0+1.20.0"
|
||||
assert_output --regexp 'NEW VERSION.*' + "0.1.0+1.20.0"
|
||||
assert_output --partial 'UNDEPLOY OVERVIEW'
|
||||
assert_output --partial 'CURRENT DEPLOYMENT 0.1.0+1.20.0'
|
||||
assert_output --partial 'ENV VERSION 0.1.0+1.20.0'
|
||||
assert_output --partial 'NEW DEPLOYMENT N/A'
|
||||
|
||||
run grep -q "TYPE=$TEST_RECIPE:0.1.0+1.20.0" \
|
||||
"$ABRA_DIR/servers/$TEST_SERVER/$TEST_APP_DOMAIN.env"
|
||||
@ -57,13 +54,10 @@ teardown(){
|
||||
run $ABRA app undeploy "$TEST_APP_DOMAIN" --no-input
|
||||
assert_success
|
||||
|
||||
# current deployment
|
||||
assert_output --regexp 'VERSION.*' + "${latestRelease}"
|
||||
assert_output --regexp 'CHAOS.*' + "${headHash:0:8}"
|
||||
|
||||
# env version
|
||||
assert_output --regexp 'CURRENT VERSION.*' + "${latestRelease}"
|
||||
assert_output --regexp 'NEW VERSION.*' + "${headHash:0:8}"
|
||||
assert_output --partial 'UNDEPLOY OVERVIEW'
|
||||
assert_output --partial "CURRENT DEPLOYMENT ${headHash:0:8}"
|
||||
assert_output --partial "ENV VERSION ${headHash:0:8}"
|
||||
assert_output --partial 'NEW DEPLOYMENT N/A'
|
||||
|
||||
run grep -q "TYPE=$TEST_RECIPE:${headHash:0:8}" \
|
||||
"$ABRA_DIR/servers/$TEST_SERVER/$TEST_APP_DOMAIN.env"
|
||||
@ -72,7 +66,6 @@ teardown(){
|
||||
|
||||
@test "chaos deploy with unstaged commits and undeploy" {
|
||||
headHash=$(_get_head_hash)
|
||||
latestRelease=$(_latest_release)
|
||||
|
||||
run bash -c 'echo "unstaged changes" >> "$ABRA_DIR/recipes/$TEST_RECIPE/foo"'
|
||||
assert_success
|
||||
@ -85,13 +78,10 @@ teardown(){
|
||||
run $ABRA app undeploy "$TEST_APP_DOMAIN" --no-input
|
||||
assert_success
|
||||
|
||||
# current deployment
|
||||
assert_output --regexp 'VERSION.*' + "${latestRelease}"
|
||||
assert_output --regexp 'CHAOS.*' + "${headHash:0:8}+U"
|
||||
|
||||
# env version
|
||||
assert_output --regexp 'CURRENT VERSION.*' + "${latestRelease}"
|
||||
assert_output --regexp 'NEW VERSION.*' + "${headHash:0:8}+U"
|
||||
assert_output --partial 'UNDEPLOY OVERVIEW'
|
||||
assert_output --partial "CURRENT DEPLOYMENT ${headHash:0:8}+U"
|
||||
assert_output --partial "ENV VERSION ${headHash:0:8}+U"
|
||||
assert_output --partial 'NEW DEPLOYMENT N/A'
|
||||
|
||||
run grep -q "TYPE=$TEST_RECIPE:${headHash:0:8}" \
|
||||
"$ABRA_DIR/servers/$TEST_SERVER/$TEST_APP_DOMAIN.env"
|
||||
|
@ -205,7 +205,7 @@ teardown(){
|
||||
}
|
||||
|
||||
# bats test_tags=slow
|
||||
@test "upgrade chaos deployment" {
|
||||
@test "upgrade commit deployment not possible" {
|
||||
tagHash=$(_get_tag_hash "0.1.0+1.20.0")
|
||||
run git -C "$ABRA_DIR/recipes/$TEST_RECIPE" checkout "$tagHash"
|
||||
assert_success
|
||||
@ -215,17 +215,8 @@ teardown(){
|
||||
assert_output --partial "${tagHash:0:8}"
|
||||
|
||||
run $ABRA app upgrade "$TEST_APP_DOMAIN" "0.1.1+1.20.2" --no-input --no-converge-checks
|
||||
assert_success
|
||||
assert_output --partial "0.1.1+1.20.2"
|
||||
assert_output --partial "${tagHash:0:8}"
|
||||
|
||||
run $ABRA app upgrade "$TEST_APP_DOMAIN" "0.2.0+1.21.0" --no-input --no-converge-checks
|
||||
assert_success
|
||||
assert_output --partial "0.2.0+1.21.0"
|
||||
|
||||
tagHash=$(_get_tag_hash "0.1.1+1.20.2")
|
||||
refute_output --partial "${tagHash:0:8}"
|
||||
assert_output --partial "false"
|
||||
assert_failure
|
||||
assert_output --partial "not a known version"
|
||||
}
|
||||
|
||||
@test "chaos commit upgrade not possible" {
|
||||
@ -239,3 +230,16 @@ teardown(){
|
||||
assert_failure
|
||||
assert_output --partial "not a known version"
|
||||
}
|
||||
|
||||
# bats test_tags=slow
|
||||
@test "no chaos version label if no chaos" {
|
||||
_deploy_app
|
||||
|
||||
run $ABRA app upgrade "$TEST_APP_DOMAIN" \
|
||||
--no-input --no-converge-checks
|
||||
assert_success
|
||||
|
||||
run $ABRA app labels "$TEST_APP_DOMAIN" --no-input
|
||||
assert_success
|
||||
refute_output --regexp "coop-cloud.abra-test-recipe.$TEST_SERVER.chaos-version"
|
||||
}
|
||||
|
@ -31,24 +31,17 @@ teardown(){
|
||||
--no-input --no-converge-checks
|
||||
assert_success
|
||||
|
||||
# current deployment
|
||||
assert_output --regexp 'VERSION.*' + "0.1.0+1.20.0"
|
||||
assert_output --regexp 'CHAOS.*false'
|
||||
|
||||
# new deployment
|
||||
assert_output --regexp 'VERSION.*' + "0.2.0+1.21.0"
|
||||
assert_output --regexp 'CHAOS.*false'
|
||||
|
||||
# env version
|
||||
assert_output --regexp 'CURRENT VERSION.*' + "0.1.0+1.20.0"
|
||||
assert_output --regexp 'NEW VERSION.*' + "0.2.0+1.21.0"
|
||||
assert_output --partial 'UPGRADE OVERVIEW'
|
||||
assert_output --partial 'CURRENT DEPLOYMENT 0.1.0+1.20.0'
|
||||
assert_output --partial 'ENV VERSION 0.1.0+1.20.0'
|
||||
assert_output --partial 'NEW DEPLOYMENT 0.2.0+1.21.0'
|
||||
|
||||
run grep -q "TYPE=$TEST_RECIPE:0.2.0+1.21.0" \
|
||||
"$ABRA_DIR/servers/$TEST_SERVER/$TEST_APP_DOMAIN.env"
|
||||
assert_success
|
||||
}
|
||||
|
||||
@test "force upgrade" {
|
||||
@test "force upgrade" {
|
||||
run $ABRA app deploy "$TEST_APP_DOMAIN" "0.2.0+1.21.0" \
|
||||
--no-input --no-converge-checks
|
||||
assert_success
|
||||
@ -57,19 +50,35 @@ teardown(){
|
||||
--no-input --no-converge-checks --force
|
||||
assert_success
|
||||
|
||||
# current deployment
|
||||
assert_output --regexp 'VERSION.*' + "0.2.0+1.21.0"
|
||||
assert_output --regexp 'CHAOS.*false'
|
||||
|
||||
# new deployment
|
||||
assert_output --regexp 'VERSION.*' + "0.2.0+1.21.0"
|
||||
assert_output --regexp 'CHAOS.*false'
|
||||
|
||||
# env version
|
||||
assert_output --regexp 'CURRENT VERSION.*' + "0.2.0+1.21.0"
|
||||
assert_output --regexp 'NEW VERSION.*' + "0.2.0+1.21.0"
|
||||
assert_output --partial 'REDEPLOY OVERVIEW'
|
||||
assert_output --partial 'CURRENT DEPLOYMENT 0.2.0+1.21.0'
|
||||
assert_output --partial 'ENV VERSION 0.2.0+1.21.0'
|
||||
assert_output --partial 'NEW DEPLOYMENT 0.2.0+1.21.0'
|
||||
|
||||
run grep -q "TYPE=$TEST_RECIPE:0.2.0+1.21.0" \
|
||||
"$ABRA_DIR/servers/$TEST_SERVER/$TEST_APP_DOMAIN.env"
|
||||
assert_success
|
||||
}
|
||||
|
||||
@test "app upgrade no .env version" {
|
||||
latestRelease=$(_latest_release)
|
||||
|
||||
run $ABRA app deploy "$TEST_APP_DOMAIN" "0.2.0+1.21.0" \
|
||||
--no-input --no-converge-checks
|
||||
assert_success
|
||||
|
||||
_wipe_env_version
|
||||
|
||||
run $ABRA app upgrade "$TEST_APP_DOMAIN" \
|
||||
--no-input --no-converge-checks --force
|
||||
assert_success
|
||||
|
||||
assert_output --partial 'UPGRADE OVERVIEW'
|
||||
assert_output --partial 'CURRENT DEPLOYMENT 0.2.0+1.21.0'
|
||||
assert_output --partial 'ENV VERSION N/A'
|
||||
assert_output --partial 'NEW DEPLOYMENT 0.3.1+1.21.0'
|
||||
|
||||
run grep -q "TYPE=$TEST_RECIPE:${latestRelease}" \
|
||||
"$ABRA_DIR/servers/$TEST_SERVER/$TEST_APP_DOMAIN.env"
|
||||
assert_success
|
||||
}
|
||||
|
@ -30,6 +30,15 @@ _undeploy_app() {
|
||||
assert_output --partial 'unknown'
|
||||
}
|
||||
|
||||
_undeploy_app2() {
|
||||
run $ABRA app undeploy "$1" --no-input
|
||||
|
||||
run $ABRA app ls --server "$TEST_SERVER" --status
|
||||
assert_success
|
||||
assert_output --partial "$1"
|
||||
assert_output --partial 'unknown'
|
||||
}
|
||||
|
||||
_rm_app() {
|
||||
# NOTE(d1): not asserting outcomes on teardown here since some might fail
|
||||
# depending on what the test created. all commands run through anyway
|
||||
|
@ -38,6 +38,8 @@ _set_git_author() {
|
||||
}
|
||||
|
||||
_git_commit() {
|
||||
_set_git_author
|
||||
|
||||
run git -C "$ABRA_DIR/recipes/$TEST_RECIPE" add .
|
||||
assert_success
|
||||
|
||||
@ -60,3 +62,7 @@ _get_current_hash() {
|
||||
_get_n_hash() {
|
||||
echo $(git -C "$ABRA_DIR/recipes/$TEST_RECIPE" show -s --format="%H" "HEAD~$1")
|
||||
}
|
||||
|
||||
_git_status() {
|
||||
echo $(git -C "$ABRA_DIR/recipes/$TEST_RECIPE" status --porcelain)
|
||||
}
|
||||
|
@ -1,7 +1,7 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
_latest_release(){
|
||||
echo $(git -C "$ABRA_DIR/recipes/$TEST_RECIPE" tag -l | tail -n 1)
|
||||
echo $(git -C "$ABRA_DIR/recipes/$TEST_RECIPE" tag -l --sort=v:refname | tail -n 1)
|
||||
}
|
||||
|
||||
_fetch_recipe() {
|
||||
@ -22,15 +22,6 @@ _reset_recipe(){
|
||||
_fetch_recipe
|
||||
}
|
||||
|
||||
_ensure_latest_version(){
|
||||
latestRelease=$(_latest_release)
|
||||
|
||||
if [ ! $latestRelease = "$1" ]; then
|
||||
echo "expected latest recipe version of '$1', saw: $latestRelease"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
_ensure_catalogue(){
|
||||
if [[ ! -d "$ABRA_DIR/catalogue" ]]; then
|
||||
run git clone https://git.coopcloud.tech/toolshed/recipes-catalogue-json.git $ABRA_DIR/catalogue
|
||||
|
@ -28,8 +28,6 @@ teardown(){
|
||||
|
||||
# bats test_tags=slow
|
||||
@test "install release candidate from script" {
|
||||
skip "current RC is brokenly specified in the installer script"
|
||||
|
||||
run bash -c 'curl https://install.abra.coopcloud.tech | bash -s -- --rc'
|
||||
assert_success
|
||||
|
||||
|
@ -41,6 +41,9 @@ teardown(){
|
||||
assert_success
|
||||
assert_output --partial 'foo'
|
||||
|
||||
assert_exists "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
assert_equal "$(_git_status)" "?? foo"
|
||||
|
||||
run $ABRA recipe lint "$TEST_RECIPE"
|
||||
assert_failure
|
||||
assert_output --partial 'locally unstaged changes'
|
||||
@ -58,6 +61,9 @@ teardown(){
|
||||
assert_success
|
||||
assert_output --partial 'foo'
|
||||
|
||||
assert_exists "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
assert_equal "$(_git_status)" "?? foo"
|
||||
|
||||
run $ABRA recipe lint "$TEST_RECIPE" --chaos
|
||||
assert_success
|
||||
|
||||
|
@ -20,6 +20,7 @@ setup(){
|
||||
|
||||
teardown() {
|
||||
_reset_recipe
|
||||
_reset_tags
|
||||
}
|
||||
|
||||
@test "validate recipe argument" {
|
||||
@ -31,8 +32,6 @@ teardown() {
|
||||
}
|
||||
|
||||
@test "release patch bump" {
|
||||
_ensure_latest_version "0.3.0+1.21.0"
|
||||
|
||||
run $ABRA recipe upgrade "$TEST_RECIPE" --no-input --patch
|
||||
assert_success
|
||||
|
||||
@ -40,6 +39,12 @@ teardown() {
|
||||
assert_success
|
||||
assert_output --partial 'image: nginx:1.21.6'
|
||||
|
||||
# NOTE(d1): ensure the latest tag is the one we expect
|
||||
_remove_tags
|
||||
run git -C "$ABRA_DIR/recipes/$TEST_RECIPE" tag \
|
||||
-a "0.3.0+1.21.0" -m "fake: 0.3.0+1.21.0"
|
||||
assert_success
|
||||
|
||||
run $ABRA recipe sync "$TEST_RECIPE" --no-input --patch
|
||||
assert_success
|
||||
assert_output --partial 'synced label'
|
||||
@ -58,8 +63,6 @@ teardown() {
|
||||
}
|
||||
|
||||
@test "release minor bump" {
|
||||
_ensure_latest_version "0.3.0+1.21.0"
|
||||
|
||||
run $ABRA recipe upgrade "$TEST_RECIPE" --no-input --minor
|
||||
assert_success
|
||||
|
||||
@ -67,6 +70,12 @@ teardown() {
|
||||
assert_success
|
||||
assert_output --regexp 'image: nginx:1.2.*'
|
||||
|
||||
# NOTE(d1): ensure the latest tag is the one we expect
|
||||
_remove_tags
|
||||
run git -C "$ABRA_DIR/recipes/$TEST_RECIPE" tag \
|
||||
-a "0.3.0+1.21.0" -m "fake: 0.3.0+1.21.0"
|
||||
assert_success
|
||||
|
||||
run $ABRA recipe sync "$TEST_RECIPE" --no-input --minor
|
||||
assert_success
|
||||
assert_output --partial 'synced label'
|
||||
@ -102,8 +111,6 @@ teardown() {
|
||||
}
|
||||
|
||||
@test "release with next release note" {
|
||||
_ensure_latest_version "0.3.0+1.21.0"
|
||||
|
||||
_mkfile "$ABRA_DIR/recipes/$TEST_RECIPE/release/next" "those are some release notes for the next release"
|
||||
|
||||
run git -C "$ABRA_DIR/recipes/$TEST_RECIPE" add release/next
|
||||
|
@ -40,6 +40,9 @@ teardown(){
|
||||
run $ABRA recipe sync "$TEST_RECIPE" --no-input --patch
|
||||
assert_success
|
||||
|
||||
assert_exists "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
assert_equal "$(_git_status)" "M compose.yml ?? foo"
|
||||
|
||||
run rm -rf "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
assert_success
|
||||
assert_not_exists "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
@ -58,8 +61,6 @@ teardown(){
|
||||
}
|
||||
|
||||
@test "sync patch label bump" {
|
||||
_ensure_latest_version "0.3.0+1.21.0"
|
||||
|
||||
run $ABRA recipe upgrade "$TEST_RECIPE" --no-input --patch
|
||||
assert_success
|
||||
|
||||
@ -67,6 +68,12 @@ teardown(){
|
||||
assert_success
|
||||
assert_output --partial 'image: nginx:1.21.6'
|
||||
|
||||
# NOTE(d1): ensure the latest tag is the one we expect
|
||||
_remove_tags
|
||||
run git -C "$ABRA_DIR/recipes/$TEST_RECIPE" tag \
|
||||
-a "0.3.0+1.21.0" -m "fake: 0.3.0+1.21.0"
|
||||
assert_success
|
||||
|
||||
run $ABRA recipe sync "$TEST_RECIPE" --no-input --patch
|
||||
assert_success
|
||||
|
||||
@ -76,8 +83,6 @@ teardown(){
|
||||
}
|
||||
|
||||
@test "sync minor label bump" {
|
||||
_ensure_latest_version "0.3.0+1.21.0"
|
||||
|
||||
run $ABRA recipe upgrade "$TEST_RECIPE" --no-input --minor
|
||||
assert_success
|
||||
|
||||
@ -85,6 +90,12 @@ teardown(){
|
||||
assert_success
|
||||
assert_output --regexp 'image: nginx:1.2.*'
|
||||
|
||||
# NOTE(d1): ensure the latest tag is the one we expect
|
||||
_remove_tags
|
||||
run git -C "$ABRA_DIR/recipes/$TEST_RECIPE" tag \
|
||||
-a "0.3.0+1.21.0" -m "fake: 0.3.0+1.21.0"
|
||||
assert_success
|
||||
|
||||
run $ABRA recipe sync "$TEST_RECIPE" --no-input --minor
|
||||
assert_success
|
||||
|
||||
|
@ -54,6 +54,9 @@ teardown(){
|
||||
assert_failure
|
||||
assert_output --partial 'locally unstaged changes'
|
||||
|
||||
assert_exists "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
assert_equal "$(_git_status)" "?? foo"
|
||||
|
||||
run rm -rf "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
assert_not_exists "$ABRA_DIR/recipes/$TEST_RECIPE/foo"
|
||||
}
|
||||
|
@ -29,8 +29,6 @@ teardown(){
|
||||
|
||||
# bats test_tags=slow
|
||||
@test "abra upgrade release candidate" {
|
||||
skip "TODO: RC publishing broke somehow, needs investigation"
|
||||
|
||||
run $ABRA upgrade --rc
|
||||
assert_success
|
||||
assert_output --partial 'Public interest infrastructure'
|
||||
|
2
vendor/github.com/moby/term/term_unix.go
generated
vendored
2
vendor/github.com/moby/term/term_unix.go
generated
vendored
@ -81,7 +81,7 @@ func setRawTerminal(fd uintptr) (*State, error) {
|
||||
return makeRaw(fd)
|
||||
}
|
||||
|
||||
func setRawTerminalOutput(fd uintptr) (*State, error) {
|
||||
func setRawTerminalOutput(uintptr) (*State, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
|
12
vendor/go.opentelemetry.io/proto/otlp/metrics/v1/metrics.pb.go
generated
vendored
12
vendor/go.opentelemetry.io/proto/otlp/metrics/v1/metrics.pb.go
generated
vendored
@ -298,7 +298,8 @@ type ResourceMetrics struct {
|
||||
// A list of metrics that originate from a resource.
|
||||
ScopeMetrics []*ScopeMetrics `protobuf:"bytes,2,rep,name=scope_metrics,json=scopeMetrics,proto3" json:"scope_metrics,omitempty"`
|
||||
// The Schema URL, if known. This is the identifier of the Schema that the resource data
|
||||
// is recorded in. To learn more about Schema URL see
|
||||
// is recorded in. Notably, the last part of the URL path is the version number of the
|
||||
// schema: http[s]://server[:port]/path/<version>. To learn more about Schema URL see
|
||||
// https://opentelemetry.io/docs/specs/otel/schemas/#schema-url
|
||||
// This schema_url applies to the data in the "resource" field. It does not apply
|
||||
// to the data in the "scope_metrics" field which have their own schema_url field.
|
||||
@ -371,7 +372,8 @@ type ScopeMetrics struct {
|
||||
// A list of metrics that originate from an instrumentation library.
|
||||
Metrics []*Metric `protobuf:"bytes,2,rep,name=metrics,proto3" json:"metrics,omitempty"`
|
||||
// The Schema URL, if known. This is the identifier of the Schema that the metric data
|
||||
// is recorded in. To learn more about Schema URL see
|
||||
// is recorded in. Notably, the last part of the URL path is the version number of the
|
||||
// schema: http[s]://server[:port]/path/<version>. To learn more about Schema URL see
|
||||
// https://opentelemetry.io/docs/specs/otel/schemas/#schema-url
|
||||
// This schema_url applies to all metrics in the "metrics" field.
|
||||
SchemaUrl string `protobuf:"bytes,3,opt,name=schema_url,json=schemaUrl,proto3" json:"schema_url,omitempty"`
|
||||
@ -1165,7 +1167,7 @@ type HistogramDataPoint struct {
|
||||
// events, and is assumed to be monotonic over the values of these events.
|
||||
// Negative events *can* be recorded, but sum should not be filled out when
|
||||
// doing so. This is specifically to enforce compatibility w/ OpenMetrics,
|
||||
// see: https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#histogram
|
||||
// see: https://github.com/prometheus/OpenMetrics/blob/v1.0.0/specification/OpenMetrics.md#histogram
|
||||
Sum *float64 `protobuf:"fixed64,5,opt,name=sum,proto3,oneof" json:"sum,omitempty"`
|
||||
// bucket_counts is an optional field contains the count values of histogram
|
||||
// for each bucket.
|
||||
@ -1347,7 +1349,7 @@ type ExponentialHistogramDataPoint struct {
|
||||
// events, and is assumed to be monotonic over the values of these events.
|
||||
// Negative events *can* be recorded, but sum should not be filled out when
|
||||
// doing so. This is specifically to enforce compatibility w/ OpenMetrics,
|
||||
// see: https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#histogram
|
||||
// see: https://github.com/prometheus/OpenMetrics/blob/v1.0.0/specification/OpenMetrics.md#histogram
|
||||
Sum *float64 `protobuf:"fixed64,5,opt,name=sum,proto3,oneof" json:"sum,omitempty"`
|
||||
// scale describes the resolution of the histogram. Boundaries are
|
||||
// located at powers of the base, where:
|
||||
@ -1560,7 +1562,7 @@ type SummaryDataPoint struct {
|
||||
// events, and is assumed to be monotonic over the values of these events.
|
||||
// Negative events *can* be recorded, but sum should not be filled out when
|
||||
// doing so. This is specifically to enforce compatibility w/ OpenMetrics,
|
||||
// see: https://github.com/OpenObservability/OpenMetrics/blob/main/specification/OpenMetrics.md#summary
|
||||
// see: https://github.com/prometheus/OpenMetrics/blob/v1.0.0/specification/OpenMetrics.md#summary
|
||||
Sum float64 `protobuf:"fixed64,5,opt,name=sum,proto3" json:"sum,omitempty"`
|
||||
// (Optional) list of values at different quantiles of the distribution calculated
|
||||
// from the current snapshot. The quantiles must be strictly increasing.
|
||||
|
6
vendor/go.opentelemetry.io/proto/otlp/trace/v1/trace.pb.go
generated
vendored
6
vendor/go.opentelemetry.io/proto/otlp/trace/v1/trace.pb.go
generated
vendored
@ -311,7 +311,8 @@ type ResourceSpans struct {
|
||||
// A list of ScopeSpans that originate from a resource.
|
||||
ScopeSpans []*ScopeSpans `protobuf:"bytes,2,rep,name=scope_spans,json=scopeSpans,proto3" json:"scope_spans,omitempty"`
|
||||
// The Schema URL, if known. This is the identifier of the Schema that the resource data
|
||||
// is recorded in. To learn more about Schema URL see
|
||||
// is recorded in. Notably, the last part of the URL path is the version number of the
|
||||
// schema: http[s]://server[:port]/path/<version>. To learn more about Schema URL see
|
||||
// https://opentelemetry.io/docs/specs/otel/schemas/#schema-url
|
||||
// This schema_url applies to the data in the "resource" field. It does not apply
|
||||
// to the data in the "scope_spans" field which have their own schema_url field.
|
||||
@ -384,7 +385,8 @@ type ScopeSpans struct {
|
||||
// A list of Spans that originate from an instrumentation scope.
|
||||
Spans []*Span `protobuf:"bytes,2,rep,name=spans,proto3" json:"spans,omitempty"`
|
||||
// The Schema URL, if known. This is the identifier of the Schema that the span data
|
||||
// is recorded in. To learn more about Schema URL see
|
||||
// is recorded in. Notably, the last part of the URL path is the version number of the
|
||||
// schema: http[s]://server[:port]/path/<version>. To learn more about Schema URL see
|
||||
// https://opentelemetry.io/docs/specs/otel/schemas/#schema-url
|
||||
// This schema_url applies to all spans and span events in the "spans" field.
|
||||
SchemaUrl string `protobuf:"bytes,3,opt,name=schema_url,json=schemaUrl,proto3" json:"schema_url,omitempty"`
|
||||
|
2
vendor/golang.org/x/net/http2/config.go
generated
vendored
2
vendor/golang.org/x/net/http2/config.go
generated
vendored
@ -60,7 +60,7 @@ func configFromServer(h1 *http.Server, h2 *Server) http2Config {
|
||||
return conf
|
||||
}
|
||||
|
||||
// configFromServer merges configuration settings from h2 and h2.t1.HTTP2
|
||||
// configFromTransport merges configuration settings from h2 and h2.t1.HTTP2
|
||||
// (the net/http Transport).
|
||||
func configFromTransport(h2 *Transport) http2Config {
|
||||
conf := http2Config{
|
||||
|
2
vendor/golang.org/x/net/http2/config_go124.go
generated
vendored
2
vendor/golang.org/x/net/http2/config_go124.go
generated
vendored
@ -13,7 +13,7 @@ func fillNetHTTPServerConfig(conf *http2Config, srv *http.Server) {
|
||||
fillNetHTTPConfig(conf, srv.HTTP2)
|
||||
}
|
||||
|
||||
// fillNetHTTPServerConfig sets fields in conf from tr.HTTP2.
|
||||
// fillNetHTTPTransportConfig sets fields in conf from tr.HTTP2.
|
||||
func fillNetHTTPTransportConfig(conf *http2Config, tr *http.Transport) {
|
||||
fillNetHTTPConfig(conf, tr.HTTP2)
|
||||
}
|
||||
|
13
vendor/golang.org/x/net/http2/transport.go
generated
vendored
13
vendor/golang.org/x/net/http2/transport.go
generated
vendored
@ -375,6 +375,7 @@ type ClientConn struct {
|
||||
doNotReuse bool // whether conn is marked to not be reused for any future requests
|
||||
closing bool
|
||||
closed bool
|
||||
closedOnIdle bool // true if conn was closed for idleness
|
||||
seenSettings bool // true if we've seen a settings frame, false otherwise
|
||||
seenSettingsChan chan struct{} // closed when seenSettings is true or frame reading fails
|
||||
wantSettingsAck bool // we sent a SETTINGS frame and haven't heard back
|
||||
@ -1089,10 +1090,12 @@ func (cc *ClientConn) idleStateLocked() (st clientConnIdleState) {
|
||||
|
||||
// If this connection has never been used for a request and is closed,
|
||||
// then let it take a request (which will fail).
|
||||
// If the conn was closed for idleness, we're racing the idle timer;
|
||||
// don't try to use the conn. (Issue #70515.)
|
||||
//
|
||||
// This avoids a situation where an error early in a connection's lifetime
|
||||
// goes unreported.
|
||||
if cc.nextStreamID == 1 && cc.streamsReserved == 0 && cc.closed {
|
||||
if cc.nextStreamID == 1 && cc.streamsReserved == 0 && cc.closed && !cc.closedOnIdle {
|
||||
st.canTakeNewRequest = true
|
||||
}
|
||||
|
||||
@ -1155,6 +1158,7 @@ func (cc *ClientConn) closeIfIdle() {
|
||||
return
|
||||
}
|
||||
cc.closed = true
|
||||
cc.closedOnIdle = true
|
||||
nextID := cc.nextStreamID
|
||||
// TODO: do clients send GOAWAY too? maybe? Just Close:
|
||||
cc.mu.Unlock()
|
||||
@ -2434,9 +2438,12 @@ func (rl *clientConnReadLoop) cleanup() {
|
||||
// This avoids a situation where new connections are constantly created,
|
||||
// added to the pool, fail, and are removed from the pool, without any error
|
||||
// being surfaced to the user.
|
||||
const unusedWaitTime = 5 * time.Second
|
||||
unusedWaitTime := 5 * time.Second
|
||||
if cc.idleTimeout > 0 && unusedWaitTime > cc.idleTimeout {
|
||||
unusedWaitTime = cc.idleTimeout
|
||||
}
|
||||
idleTime := cc.t.now().Sub(cc.lastActive)
|
||||
if atomic.LoadUint32(&cc.atomicReused) == 0 && idleTime < unusedWaitTime {
|
||||
if atomic.LoadUint32(&cc.atomicReused) == 0 && idleTime < unusedWaitTime && !cc.closedOnIdle {
|
||||
cc.idleTimer = cc.t.afterFunc(unusedWaitTime-idleTime, func() {
|
||||
cc.t.connPool().MarkDead(cc)
|
||||
})
|
||||
|
12
vendor/golang.org/x/sys/unix/syscall_dragonfly.go
generated
vendored
12
vendor/golang.org/x/sys/unix/syscall_dragonfly.go
generated
vendored
@ -246,6 +246,18 @@ func Sendfile(outfd int, infd int, offset *int64, count int) (written int, err e
|
||||
return sendfile(outfd, infd, offset, count)
|
||||
}
|
||||
|
||||
func Dup3(oldfd, newfd, flags int) error {
|
||||
if oldfd == newfd || flags&^O_CLOEXEC != 0 {
|
||||
return EINVAL
|
||||
}
|
||||
how := F_DUP2FD
|
||||
if flags&O_CLOEXEC != 0 {
|
||||
how = F_DUP2FD_CLOEXEC
|
||||
}
|
||||
_, err := fcntl(oldfd, how, newfd)
|
||||
return err
|
||||
}
|
||||
|
||||
/*
|
||||
* Exposed directly
|
||||
*/
|
||||
|
11
vendor/golang.org/x/sys/windows/dll_windows.go
generated
vendored
11
vendor/golang.org/x/sys/windows/dll_windows.go
generated
vendored
@ -43,8 +43,8 @@ type DLL struct {
|
||||
// LoadDLL loads DLL file into memory.
|
||||
//
|
||||
// Warning: using LoadDLL without an absolute path name is subject to
|
||||
// DLL preloading attacks. To safely load a system DLL, use LazyDLL
|
||||
// with System set to true, or use LoadLibraryEx directly.
|
||||
// DLL preloading attacks. To safely load a system DLL, use [NewLazySystemDLL],
|
||||
// or use [LoadLibraryEx] directly.
|
||||
func LoadDLL(name string) (dll *DLL, err error) {
|
||||
namep, err := UTF16PtrFromString(name)
|
||||
if err != nil {
|
||||
@ -271,6 +271,9 @@ func (d *LazyDLL) NewProc(name string) *LazyProc {
|
||||
}
|
||||
|
||||
// NewLazyDLL creates new LazyDLL associated with DLL file.
|
||||
//
|
||||
// Warning: using NewLazyDLL without an absolute path name is subject to
|
||||
// DLL preloading attacks. To safely load a system DLL, use [NewLazySystemDLL].
|
||||
func NewLazyDLL(name string) *LazyDLL {
|
||||
return &LazyDLL{Name: name}
|
||||
}
|
||||
@ -410,7 +413,3 @@ func loadLibraryEx(name string, system bool) (*DLL, error) {
|
||||
}
|
||||
return &DLL{Name: name, Handle: h}, nil
|
||||
}
|
||||
|
||||
type errString string
|
||||
|
||||
func (s errString) Error() string { return string(s) }
|
||||
|
5
vendor/golang.org/x/tools/go/packages/golist.go
generated
vendored
5
vendor/golang.org/x/tools/go/packages/golist.go
generated
vendored
@ -322,6 +322,7 @@ type jsonPackage struct {
|
||||
ImportPath string
|
||||
Dir string
|
||||
Name string
|
||||
Target string
|
||||
Export string
|
||||
GoFiles []string
|
||||
CompiledGoFiles []string
|
||||
@ -506,6 +507,7 @@ func (state *golistState) createDriverResponse(words ...string) (*DriverResponse
|
||||
Name: p.Name,
|
||||
ID: p.ImportPath,
|
||||
Dir: p.Dir,
|
||||
Target: p.Target,
|
||||
GoFiles: absJoin(p.Dir, p.GoFiles, p.CgoFiles),
|
||||
CompiledGoFiles: absJoin(p.Dir, p.CompiledGoFiles),
|
||||
OtherFiles: absJoin(p.Dir, otherFiles(p)...),
|
||||
@ -811,6 +813,9 @@ func jsonFlag(cfg *Config, goVersion int) string {
|
||||
if cfg.Mode&NeedEmbedPatterns != 0 {
|
||||
addFields("EmbedPatterns")
|
||||
}
|
||||
if cfg.Mode&NeedTarget != 0 {
|
||||
addFields("Target")
|
||||
}
|
||||
return "-json=" + strings.Join(fields, ",")
|
||||
}
|
||||
|
||||
|
1
vendor/golang.org/x/tools/go/packages/loadmode_string.go
generated
vendored
1
vendor/golang.org/x/tools/go/packages/loadmode_string.go
generated
vendored
@ -27,6 +27,7 @@ var modes = [...]struct {
|
||||
{NeedModule, "NeedModule"},
|
||||
{NeedEmbedFiles, "NeedEmbedFiles"},
|
||||
{NeedEmbedPatterns, "NeedEmbedPatterns"},
|
||||
{NeedTarget, "NeedTarget"},
|
||||
}
|
||||
|
||||
func (mode LoadMode) String() string {
|
||||
|
7
vendor/golang.org/x/tools/go/packages/packages.go
generated
vendored
7
vendor/golang.org/x/tools/go/packages/packages.go
generated
vendored
@ -118,6 +118,9 @@ const (
|
||||
// NeedEmbedPatterns adds EmbedPatterns.
|
||||
NeedEmbedPatterns
|
||||
|
||||
// NeedTarget adds Target.
|
||||
NeedTarget
|
||||
|
||||
// Be sure to update loadmode_string.go when adding new items!
|
||||
)
|
||||
|
||||
@ -479,6 +482,10 @@ type Package struct {
|
||||
// information for the package as provided by the build system.
|
||||
ExportFile string
|
||||
|
||||
// Target is the absolute install path of the .a file, for libraries,
|
||||
// and of the executable file, for binaries.
|
||||
Target string
|
||||
|
||||
// Imports maps import paths appearing in the package's Go source files
|
||||
// to corresponding loaded Packages.
|
||||
Imports map[string]*Package
|
||||
|
244
vendor/golang.org/x/tools/go/types/typeutil/map.go
generated
vendored
244
vendor/golang.org/x/tools/go/types/typeutil/map.go
generated
vendored
@ -2,30 +2,35 @@
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package typeutil defines various utilities for types, such as Map,
|
||||
// a mapping from types.Type to any values.
|
||||
package typeutil // import "golang.org/x/tools/go/types/typeutil"
|
||||
// Package typeutil defines various utilities for types, such as [Map],
|
||||
// a hash table that maps [types.Type] to any value.
|
||||
package typeutil
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"go/types"
|
||||
"reflect"
|
||||
"hash/maphash"
|
||||
"unsafe"
|
||||
|
||||
"golang.org/x/tools/internal/typeparams"
|
||||
)
|
||||
|
||||
// Map is a hash-table-based mapping from types (types.Type) to
|
||||
// arbitrary any values. The concrete types that implement
|
||||
// arbitrary values. The concrete types that implement
|
||||
// the Type interface are pointers. Since they are not canonicalized,
|
||||
// == cannot be used to check for equivalence, and thus we cannot
|
||||
// simply use a Go map.
|
||||
//
|
||||
// Just as with map[K]V, a nil *Map is a valid empty map.
|
||||
//
|
||||
// Not thread-safe.
|
||||
// Read-only map operations ([Map.At], [Map.Len], and so on) may
|
||||
// safely be called concurrently.
|
||||
//
|
||||
// TODO(adonovan): deprecate in favor of https://go.dev/issues/69420
|
||||
// and 69559, if the latter proposals for a generic hash-map type and
|
||||
// a types.Hash function are accepted.
|
||||
type Map struct {
|
||||
hasher Hasher // shared by many Maps
|
||||
table map[uint32][]entry // maps hash to bucket; entry.key==nil means unused
|
||||
length int // number of map entries
|
||||
}
|
||||
@ -36,35 +41,17 @@ type entry struct {
|
||||
value any
|
||||
}
|
||||
|
||||
// SetHasher sets the hasher used by Map.
|
||||
// SetHasher has no effect.
|
||||
//
|
||||
// All Hashers are functionally equivalent but contain internal state
|
||||
// used to cache the results of hashing previously seen types.
|
||||
//
|
||||
// A single Hasher created by MakeHasher() may be shared among many
|
||||
// Maps. This is recommended if the instances have many keys in
|
||||
// common, as it will amortize the cost of hash computation.
|
||||
//
|
||||
// A Hasher may grow without bound as new types are seen. Even when a
|
||||
// type is deleted from the map, the Hasher never shrinks, since other
|
||||
// types in the map may reference the deleted type indirectly.
|
||||
//
|
||||
// Hashers are not thread-safe, and read-only operations such as
|
||||
// Map.Lookup require updates to the hasher, so a full Mutex lock (not a
|
||||
// read-lock) is require around all Map operations if a shared
|
||||
// hasher is accessed from multiple threads.
|
||||
//
|
||||
// If SetHasher is not called, the Map will create a private hasher at
|
||||
// the first call to Insert.
|
||||
func (m *Map) SetHasher(hasher Hasher) {
|
||||
m.hasher = hasher
|
||||
}
|
||||
// It is a relic of an optimization that is no longer profitable. Do
|
||||
// not use [Hasher], [MakeHasher], or [SetHasher] in new code.
|
||||
func (m *Map) SetHasher(Hasher) {}
|
||||
|
||||
// Delete removes the entry with the given key, if any.
|
||||
// It returns true if the entry was found.
|
||||
func (m *Map) Delete(key types.Type) bool {
|
||||
if m != nil && m.table != nil {
|
||||
hash := m.hasher.Hash(key)
|
||||
hash := hash(key)
|
||||
bucket := m.table[hash]
|
||||
for i, e := range bucket {
|
||||
if e.key != nil && types.Identical(key, e.key) {
|
||||
@ -83,7 +70,7 @@ func (m *Map) Delete(key types.Type) bool {
|
||||
// The result is nil if the entry is not present.
|
||||
func (m *Map) At(key types.Type) any {
|
||||
if m != nil && m.table != nil {
|
||||
for _, e := range m.table[m.hasher.Hash(key)] {
|
||||
for _, e := range m.table[hash(key)] {
|
||||
if e.key != nil && types.Identical(key, e.key) {
|
||||
return e.value
|
||||
}
|
||||
@ -96,7 +83,7 @@ func (m *Map) At(key types.Type) any {
|
||||
// and returns the previous entry, if any.
|
||||
func (m *Map) Set(key types.Type, value any) (prev any) {
|
||||
if m.table != nil {
|
||||
hash := m.hasher.Hash(key)
|
||||
hash := hash(key)
|
||||
bucket := m.table[hash]
|
||||
var hole *entry
|
||||
for i, e := range bucket {
|
||||
@ -115,10 +102,7 @@ func (m *Map) Set(key types.Type, value any) (prev any) {
|
||||
m.table[hash] = append(bucket, entry{key, value})
|
||||
}
|
||||
} else {
|
||||
if m.hasher.memo == nil {
|
||||
m.hasher = MakeHasher()
|
||||
}
|
||||
hash := m.hasher.Hash(key)
|
||||
hash := hash(key)
|
||||
m.table = map[uint32][]entry{hash: {entry{key, value}}}
|
||||
}
|
||||
|
||||
@ -195,53 +179,35 @@ func (m *Map) KeysString() string {
|
||||
return m.toString(false)
|
||||
}
|
||||
|
||||
////////////////////////////////////////////////////////////////////////
|
||||
// Hasher
|
||||
// -- Hasher --
|
||||
|
||||
// A Hasher maps each type to its hash value.
|
||||
// For efficiency, a hasher uses memoization; thus its memory
|
||||
// footprint grows monotonically over time.
|
||||
// Hashers are not thread-safe.
|
||||
// Hashers have reference semantics.
|
||||
// Call MakeHasher to create a Hasher.
|
||||
type Hasher struct {
|
||||
memo map[types.Type]uint32
|
||||
|
||||
// ptrMap records pointer identity.
|
||||
ptrMap map[any]uint32
|
||||
|
||||
// sigTParams holds type parameters from the signature being hashed.
|
||||
// Signatures are considered identical modulo renaming of type parameters, so
|
||||
// within the scope of a signature type the identity of the signature's type
|
||||
// parameters is just their index.
|
||||
//
|
||||
// Since the language does not currently support referring to uninstantiated
|
||||
// generic types or functions, and instantiated signatures do not have type
|
||||
// parameter lists, we should never encounter a second non-empty type
|
||||
// parameter list when hashing a generic signature.
|
||||
sigTParams *types.TypeParamList
|
||||
// hash returns the hash of type t.
|
||||
// TODO(adonovan): replace by types.Hash when Go proposal #69420 is accepted.
|
||||
func hash(t types.Type) uint32 {
|
||||
return theHasher.Hash(t)
|
||||
}
|
||||
|
||||
// MakeHasher returns a new Hasher instance.
|
||||
func MakeHasher() Hasher {
|
||||
return Hasher{
|
||||
memo: make(map[types.Type]uint32),
|
||||
ptrMap: make(map[any]uint32),
|
||||
sigTParams: nil,
|
||||
}
|
||||
}
|
||||
// A Hasher provides a [Hasher.Hash] method to map a type to its hash value.
|
||||
// Hashers are stateless, and all are equivalent.
|
||||
type Hasher struct{}
|
||||
|
||||
var theHasher Hasher
|
||||
|
||||
// MakeHasher returns Hasher{}.
|
||||
// Hashers are stateless; all are equivalent.
|
||||
func MakeHasher() Hasher { return theHasher }
|
||||
|
||||
// Hash computes a hash value for the given type t such that
|
||||
// Identical(t, t') => Hash(t) == Hash(t').
|
||||
func (h Hasher) Hash(t types.Type) uint32 {
|
||||
hash, ok := h.memo[t]
|
||||
if !ok {
|
||||
hash = h.hashFor(t)
|
||||
h.memo[t] = hash
|
||||
}
|
||||
return hash
|
||||
return hasher{inGenericSig: false}.hash(t)
|
||||
}
|
||||
|
||||
// hasher holds the state of a single Hash traversal: whether we are
|
||||
// inside the signature of a generic function; this is used to
|
||||
// optimize [hasher.hashTypeParam].
|
||||
type hasher struct{ inGenericSig bool }
|
||||
|
||||
// hashString computes the Fowler–Noll–Vo hash of s.
|
||||
func hashString(s string) uint32 {
|
||||
var h uint32
|
||||
@ -252,21 +218,21 @@ func hashString(s string) uint32 {
|
||||
return h
|
||||
}
|
||||
|
||||
// hashFor computes the hash of t.
|
||||
func (h Hasher) hashFor(t types.Type) uint32 {
|
||||
// hash computes the hash of t.
|
||||
func (h hasher) hash(t types.Type) uint32 {
|
||||
// See Identical for rationale.
|
||||
switch t := t.(type) {
|
||||
case *types.Basic:
|
||||
return uint32(t.Kind())
|
||||
|
||||
case *types.Alias:
|
||||
return h.Hash(types.Unalias(t))
|
||||
return h.hash(types.Unalias(t))
|
||||
|
||||
case *types.Array:
|
||||
return 9043 + 2*uint32(t.Len()) + 3*h.Hash(t.Elem())
|
||||
return 9043 + 2*uint32(t.Len()) + 3*h.hash(t.Elem())
|
||||
|
||||
case *types.Slice:
|
||||
return 9049 + 2*h.Hash(t.Elem())
|
||||
return 9049 + 2*h.hash(t.Elem())
|
||||
|
||||
case *types.Struct:
|
||||
var hash uint32 = 9059
|
||||
@ -277,12 +243,12 @@ func (h Hasher) hashFor(t types.Type) uint32 {
|
||||
}
|
||||
hash += hashString(t.Tag(i))
|
||||
hash += hashString(f.Name()) // (ignore f.Pkg)
|
||||
hash += h.Hash(f.Type())
|
||||
hash += h.hash(f.Type())
|
||||
}
|
||||
return hash
|
||||
|
||||
case *types.Pointer:
|
||||
return 9067 + 2*h.Hash(t.Elem())
|
||||
return 9067 + 2*h.hash(t.Elem())
|
||||
|
||||
case *types.Signature:
|
||||
var hash uint32 = 9091
|
||||
@ -290,33 +256,11 @@ func (h Hasher) hashFor(t types.Type) uint32 {
|
||||
hash *= 8863
|
||||
}
|
||||
|
||||
// Use a separate hasher for types inside of the signature, where type
|
||||
// parameter identity is modified to be (index, constraint). We must use a
|
||||
// new memo for this hasher as type identity may be affected by this
|
||||
// masking. For example, in func[T any](*T), the identity of *T depends on
|
||||
// whether we are mapping the argument in isolation, or recursively as part
|
||||
// of hashing the signature.
|
||||
//
|
||||
// We should never encounter a generic signature while hashing another
|
||||
// generic signature, but defensively set sigTParams only if h.mask is
|
||||
// unset.
|
||||
tparams := t.TypeParams()
|
||||
if h.sigTParams == nil && tparams.Len() != 0 {
|
||||
h = Hasher{
|
||||
// There may be something more efficient than discarding the existing
|
||||
// memo, but it would require detecting whether types are 'tainted' by
|
||||
// references to type parameters.
|
||||
memo: make(map[types.Type]uint32),
|
||||
// Re-using ptrMap ensures that pointer identity is preserved in this
|
||||
// hasher.
|
||||
ptrMap: h.ptrMap,
|
||||
sigTParams: tparams,
|
||||
}
|
||||
}
|
||||
|
||||
for i := 0; i < tparams.Len(); i++ {
|
||||
for i := range tparams.Len() {
|
||||
h.inGenericSig = true
|
||||
tparam := tparams.At(i)
|
||||
hash += 7 * h.Hash(tparam.Constraint())
|
||||
hash += 7 * h.hash(tparam.Constraint())
|
||||
}
|
||||
|
||||
return hash + 3*h.hashTuple(t.Params()) + 5*h.hashTuple(t.Results())
|
||||
@ -350,17 +294,17 @@ func (h Hasher) hashFor(t types.Type) uint32 {
|
||||
return hash
|
||||
|
||||
case *types.Map:
|
||||
return 9109 + 2*h.Hash(t.Key()) + 3*h.Hash(t.Elem())
|
||||
return 9109 + 2*h.hash(t.Key()) + 3*h.hash(t.Elem())
|
||||
|
||||
case *types.Chan:
|
||||
return 9127 + 2*uint32(t.Dir()) + 3*h.Hash(t.Elem())
|
||||
return 9127 + 2*uint32(t.Dir()) + 3*h.hash(t.Elem())
|
||||
|
||||
case *types.Named:
|
||||
hash := h.hashPtr(t.Obj())
|
||||
hash := h.hashTypeName(t.Obj())
|
||||
targs := t.TypeArgs()
|
||||
for i := 0; i < targs.Len(); i++ {
|
||||
targ := targs.At(i)
|
||||
hash += 2 * h.Hash(targ)
|
||||
hash += 2 * h.hash(targ)
|
||||
}
|
||||
return hash
|
||||
|
||||
@ -374,17 +318,17 @@ func (h Hasher) hashFor(t types.Type) uint32 {
|
||||
panic(fmt.Sprintf("%T: %v", t, t))
|
||||
}
|
||||
|
||||
func (h Hasher) hashTuple(tuple *types.Tuple) uint32 {
|
||||
func (h hasher) hashTuple(tuple *types.Tuple) uint32 {
|
||||
// See go/types.identicalTypes for rationale.
|
||||
n := tuple.Len()
|
||||
hash := 9137 + 2*uint32(n)
|
||||
for i := 0; i < n; i++ {
|
||||
hash += 3 * h.Hash(tuple.At(i).Type())
|
||||
for i := range n {
|
||||
hash += 3 * h.hash(tuple.At(i).Type())
|
||||
}
|
||||
return hash
|
||||
}
|
||||
|
||||
func (h Hasher) hashUnion(t *types.Union) uint32 {
|
||||
func (h hasher) hashUnion(t *types.Union) uint32 {
|
||||
// Hash type restrictions.
|
||||
terms, err := typeparams.UnionTermSet(t)
|
||||
// if err != nil t has invalid type restrictions. Fall back on a non-zero
|
||||
@ -395,11 +339,11 @@ func (h Hasher) hashUnion(t *types.Union) uint32 {
|
||||
return h.hashTermSet(terms)
|
||||
}
|
||||
|
||||
func (h Hasher) hashTermSet(terms []*types.Term) uint32 {
|
||||
func (h hasher) hashTermSet(terms []*types.Term) uint32 {
|
||||
hash := 9157 + 2*uint32(len(terms))
|
||||
for _, term := range terms {
|
||||
// term order is not significant.
|
||||
termHash := h.Hash(term.Type())
|
||||
termHash := h.hash(term.Type())
|
||||
if term.Tilde() {
|
||||
termHash *= 9161
|
||||
}
|
||||
@ -408,36 +352,42 @@ func (h Hasher) hashTermSet(terms []*types.Term) uint32 {
|
||||
return hash
|
||||
}
|
||||
|
||||
// hashTypeParam returns a hash of the type parameter t, with a hash value
|
||||
// depending on whether t is contained in h.sigTParams.
|
||||
//
|
||||
// If h.sigTParams is set and contains t, then we are in the process of hashing
|
||||
// a signature, and the hash value of t must depend only on t's index and
|
||||
// constraint: signatures are considered identical modulo type parameter
|
||||
// renaming. To avoid infinite recursion, we only hash the type parameter
|
||||
// index, and rely on types.Identical to handle signatures where constraints
|
||||
// are not identical.
|
||||
//
|
||||
// Otherwise the hash of t depends only on t's pointer identity.
|
||||
func (h Hasher) hashTypeParam(t *types.TypeParam) uint32 {
|
||||
if h.sigTParams != nil {
|
||||
i := t.Index()
|
||||
if i >= 0 && i < h.sigTParams.Len() && t == h.sigTParams.At(i) {
|
||||
return 9173 + 3*uint32(i)
|
||||
}
|
||||
// hashTypeParam returns the hash of a type parameter.
|
||||
func (h hasher) hashTypeParam(t *types.TypeParam) uint32 {
|
||||
// Within the signature of a generic function, TypeParams are
|
||||
// identical if they have the same index and constraint, so we
|
||||
// hash them based on index.
|
||||
//
|
||||
// When we are outside a generic function, free TypeParams are
|
||||
// identical iff they are the same object, so we can use a
|
||||
// more discriminating hash consistent with object identity.
|
||||
// This optimization saves [Map] about 4% when hashing all the
|
||||
// types.Info.Types in the forward closure of net/http.
|
||||
if !h.inGenericSig {
|
||||
// Optimization: outside a generic function signature,
|
||||
// use a more discrimating hash consistent with object identity.
|
||||
return h.hashTypeName(t.Obj())
|
||||
}
|
||||
return h.hashPtr(t.Obj())
|
||||
return 9173 + 3*uint32(t.Index())
|
||||
}
|
||||
|
||||
// hashPtr hashes the pointer identity of ptr. It uses h.ptrMap to ensure that
|
||||
// pointers values are not dependent on the GC.
|
||||
func (h Hasher) hashPtr(ptr any) uint32 {
|
||||
if hash, ok := h.ptrMap[ptr]; ok {
|
||||
return hash
|
||||
}
|
||||
hash := uint32(reflect.ValueOf(ptr).Pointer())
|
||||
h.ptrMap[ptr] = hash
|
||||
return hash
|
||||
var theSeed = maphash.MakeSeed()
|
||||
|
||||
// hashTypeName hashes the pointer of tname.
|
||||
func (hasher) hashTypeName(tname *types.TypeName) uint32 {
|
||||
// Since types.Identical uses == to compare TypeNames,
|
||||
// the Hash function uses maphash.Comparable.
|
||||
// TODO(adonovan): or will, when it becomes available in go1.24.
|
||||
// In the meantime we use the pointer's numeric value.
|
||||
//
|
||||
// hash := maphash.Comparable(theSeed, tname)
|
||||
//
|
||||
// (Another approach would be to hash the name and package
|
||||
// path, and whether or not it is a package-level typename. It
|
||||
// is rare for a package to define multiple local types with
|
||||
// the same name.)
|
||||
hash := uintptr(unsafe.Pointer(tname))
|
||||
return uint32(hash ^ (hash >> 32))
|
||||
}
|
||||
|
||||
// shallowHash computes a hash of t without looking at any of its
|
||||
@ -454,7 +404,7 @@ func (h Hasher) hashPtr(ptr any) uint32 {
|
||||
// include m itself; there is no mention of the named type X that
|
||||
// might help us break the cycle.
|
||||
// (See comment in go/types.identical, case *Interface, for more.)
|
||||
func (h Hasher) shallowHash(t types.Type) uint32 {
|
||||
func (h hasher) shallowHash(t types.Type) uint32 {
|
||||
// t is the type of an interface method (Signature),
|
||||
// its params or results (Tuples), or their immediate
|
||||
// elements (mostly Slice, Pointer, Basic, Named),
|
||||
@ -475,7 +425,7 @@ func (h Hasher) shallowHash(t types.Type) uint32 {
|
||||
case *types.Tuple:
|
||||
n := t.Len()
|
||||
hash := 9137 + 2*uint32(n)
|
||||
for i := 0; i < n; i++ {
|
||||
for i := range n {
|
||||
hash += 53471161 * h.shallowHash(t.At(i).Type())
|
||||
}
|
||||
return hash
|
||||
@ -508,10 +458,10 @@ func (h Hasher) shallowHash(t types.Type) uint32 {
|
||||
return 9127
|
||||
|
||||
case *types.Named:
|
||||
return h.hashPtr(t.Obj())
|
||||
return h.hashTypeName(t.Obj())
|
||||
|
||||
case *types.TypeParam:
|
||||
return h.hashPtr(t.Obj())
|
||||
return h.hashTypeParam(t)
|
||||
}
|
||||
panic(fmt.Sprintf("shallowHash: %T: %v", t, t))
|
||||
}
|
||||
|
455
vendor/golang.org/x/tools/internal/gcimporter/exportdata.go
generated
vendored
455
vendor/golang.org/x/tools/internal/gcimporter/exportdata.go
generated
vendored
@ -2,52 +2,183 @@
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// This file is a copy of $GOROOT/src/go/internal/gcimporter/exportdata.go.
|
||||
|
||||
// This file implements FindExportData.
|
||||
// This file should be kept in sync with $GOROOT/src/internal/exportdata/exportdata.go.
|
||||
// This file also additionally implements FindExportData for gcexportdata.NewReader.
|
||||
|
||||
package gcimporter
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"go/build"
|
||||
"io"
|
||||
"strconv"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
func readGopackHeader(r *bufio.Reader) (name string, size int64, err error) {
|
||||
// See $GOROOT/include/ar.h.
|
||||
hdr := make([]byte, 16+12+6+6+8+10+2)
|
||||
_, err = io.ReadFull(r, hdr)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
// leave for debugging
|
||||
if false {
|
||||
fmt.Printf("header: %s", hdr)
|
||||
}
|
||||
s := strings.TrimSpace(string(hdr[16+12+6+6+8:][:10]))
|
||||
length, err := strconv.Atoi(s)
|
||||
size = int64(length)
|
||||
if err != nil || hdr[len(hdr)-2] != '`' || hdr[len(hdr)-1] != '\n' {
|
||||
err = fmt.Errorf("invalid archive header")
|
||||
return
|
||||
}
|
||||
name = strings.TrimSpace(string(hdr[:16]))
|
||||
return
|
||||
}
|
||||
|
||||
// FindExportData positions the reader r at the beginning of the
|
||||
// export data section of an underlying cmd/compile created archive
|
||||
// file by reading from it. The reader must be positioned at the
|
||||
// start of the file before calling this function.
|
||||
// The size result is the length of the export data in bytes.
|
||||
// This returns the length of the export data in bytes.
|
||||
//
|
||||
// This function is needed by [gcexportdata.Read], which must
|
||||
// accept inputs produced by the last two releases of cmd/compile,
|
||||
// plus tip.
|
||||
func FindExportData(r *bufio.Reader) (size int64, err error) {
|
||||
arsize, err := FindPackageDefinition(r)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
size = int64(arsize)
|
||||
|
||||
objapi, headers, err := ReadObjectHeaders(r)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
size -= int64(len(objapi))
|
||||
for _, h := range headers {
|
||||
size -= int64(len(h))
|
||||
}
|
||||
|
||||
// Check for the binary export data section header "$$B\n".
|
||||
// TODO(taking): Unify with ReadExportDataHeader so that it stops at the 'u' instead of reading
|
||||
line, err := r.ReadSlice('\n')
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
hdr := string(line)
|
||||
if hdr != "$$B\n" {
|
||||
err = fmt.Errorf("unknown export data header: %q", hdr)
|
||||
return
|
||||
}
|
||||
size -= int64(len(hdr))
|
||||
|
||||
// For files with a binary export data header "$$B\n",
|
||||
// these are always terminated by an end-of-section marker "\n$$\n".
|
||||
// So the last bytes must always be this constant.
|
||||
//
|
||||
// The end-of-section marker is not a part of the export data itself.
|
||||
// Do not include these in size.
|
||||
//
|
||||
// It would be nice to have sanity check that the final bytes after
|
||||
// the export data are indeed the end-of-section marker. The split
|
||||
// of gcexportdata.NewReader and gcexportdata.Read make checking this
|
||||
// ugly so gcimporter gives up enforcing this. The compiler and go/types
|
||||
// importer do enforce this, which seems good enough.
|
||||
const endofsection = "\n$$\n"
|
||||
size -= int64(len(endofsection))
|
||||
|
||||
if size < 0 {
|
||||
err = fmt.Errorf("invalid size (%d) in the archive file: %d bytes remain without section headers (recompile package)", arsize, size)
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// ReadUnified reads the contents of the unified export data from a reader r
|
||||
// that contains the contents of a GC-created archive file.
|
||||
//
|
||||
// On success, the reader will be positioned after the end-of-section marker "\n$$\n".
|
||||
//
|
||||
// Supported GC-created archive files have 4 layers of nesting:
|
||||
// - An archive file containing a package definition file.
|
||||
// - The package definition file contains headers followed by a data section.
|
||||
// Headers are lines (≤ 4kb) that do not start with "$$".
|
||||
// - The data section starts with "$$B\n" followed by export data followed
|
||||
// by an end of section marker "\n$$\n". (The section start "$$\n" is no
|
||||
// longer supported.)
|
||||
// - The export data starts with a format byte ('u') followed by the <data> in
|
||||
// the given format. (See ReadExportDataHeader for older formats.)
|
||||
//
|
||||
// Putting this together, the bytes in a GC-created archive files are expected
|
||||
// to look like the following.
|
||||
// See cmd/internal/archive for more details on ar file headers.
|
||||
//
|
||||
// | <!arch>\n | ar file signature
|
||||
// | __.PKGDEF...size...\n | ar header for __.PKGDEF including size.
|
||||
// | go object <...>\n | objabi header
|
||||
// | <optional headers>\n | other headers such as build id
|
||||
// | $$B\n | binary format marker
|
||||
// | u<data>\n | unified export <data>
|
||||
// | $$\n | end-of-section marker
|
||||
// | [optional padding] | padding byte (0x0A) if size is odd
|
||||
// | [ar file header] | other ar files
|
||||
// | [ar file data] |
|
||||
func ReadUnified(r *bufio.Reader) (data []byte, err error) {
|
||||
// We historically guaranteed headers at the default buffer size (4096) work.
|
||||
// This ensures we can use ReadSlice throughout.
|
||||
const minBufferSize = 4096
|
||||
r = bufio.NewReaderSize(r, minBufferSize)
|
||||
|
||||
size, err := FindPackageDefinition(r)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
n := size
|
||||
|
||||
objapi, headers, err := ReadObjectHeaders(r)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
n -= len(objapi)
|
||||
for _, h := range headers {
|
||||
n -= len(h)
|
||||
}
|
||||
|
||||
hdrlen, err := ReadExportDataHeader(r)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
n -= hdrlen
|
||||
|
||||
// size also includes the end of section marker. Remove that many bytes from the end.
|
||||
const marker = "\n$$\n"
|
||||
n -= len(marker)
|
||||
|
||||
if n < 0 {
|
||||
err = fmt.Errorf("invalid size (%d) in the archive file: %d bytes remain without section headers (recompile package)", size, n)
|
||||
return
|
||||
}
|
||||
|
||||
// Read n bytes from buf.
|
||||
data = make([]byte, n)
|
||||
_, err = io.ReadFull(r, data)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Check for marker at the end.
|
||||
var suffix [len(marker)]byte
|
||||
_, err = io.ReadFull(r, suffix[:])
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if s := string(suffix[:]); s != marker {
|
||||
err = fmt.Errorf("read %q instead of end-of-section marker (%q)", s, marker)
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// FindPackageDefinition positions the reader r at the beginning of a package
|
||||
// definition file ("__.PKGDEF") within a GC-created archive by reading
|
||||
// from it, and returns the size of the package definition file in the archive.
|
||||
//
|
||||
// The reader must be positioned at the start of the archive file before calling
|
||||
// this function, and "__.PKGDEF" is assumed to be the first file in the archive.
|
||||
//
|
||||
// See cmd/internal/archive for details on the archive format.
|
||||
func FindPackageDefinition(r *bufio.Reader) (size int, err error) {
|
||||
// Uses ReadSlice to limit risk of malformed inputs.
|
||||
|
||||
// Read first line to make sure this is an object file.
|
||||
line, err := r.ReadSlice('\n')
|
||||
if err != nil {
|
||||
@ -61,56 +192,230 @@ func FindExportData(r *bufio.Reader) (size int64, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
// Archive file. Scan to __.PKGDEF.
|
||||
var name string
|
||||
if name, size, err = readGopackHeader(r); err != nil {
|
||||
return
|
||||
}
|
||||
arsize := size
|
||||
|
||||
// First entry should be __.PKGDEF.
|
||||
if name != "__.PKGDEF" {
|
||||
err = fmt.Errorf("go archive is missing __.PKGDEF")
|
||||
return
|
||||
}
|
||||
|
||||
// Read first line of __.PKGDEF data, so that line
|
||||
// is once again the first line of the input.
|
||||
if line, err = r.ReadSlice('\n'); err != nil {
|
||||
err = fmt.Errorf("can't find export data (%v)", err)
|
||||
return
|
||||
}
|
||||
size -= int64(len(line))
|
||||
|
||||
// Now at __.PKGDEF in archive or still at beginning of file.
|
||||
// Either way, line should begin with "go object ".
|
||||
if !strings.HasPrefix(string(line), "go object ") {
|
||||
err = fmt.Errorf("not a Go object file")
|
||||
return
|
||||
}
|
||||
|
||||
// Skip over object headers to get to the export data section header "$$B\n".
|
||||
// Object headers are lines that do not start with '$'.
|
||||
for line[0] != '$' {
|
||||
if line, err = r.ReadSlice('\n'); err != nil {
|
||||
err = fmt.Errorf("can't find export data (%v)", err)
|
||||
return
|
||||
}
|
||||
size -= int64(len(line))
|
||||
}
|
||||
|
||||
// Check for the binary export data section header "$$B\n".
|
||||
hdr := string(line)
|
||||
if hdr != "$$B\n" {
|
||||
err = fmt.Errorf("unknown export data header: %q", hdr)
|
||||
return
|
||||
}
|
||||
// TODO(taking): Remove end-of-section marker "\n$$\n" from size.
|
||||
|
||||
if size < 0 {
|
||||
err = fmt.Errorf("invalid size (%d) in the archive file: %d bytes remain without section headers (recompile package)", arsize, size)
|
||||
// package export block should be first
|
||||
size = readArchiveHeader(r, "__.PKGDEF")
|
||||
if size <= 0 {
|
||||
err = fmt.Errorf("not a package file")
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// ReadObjectHeaders reads object headers from the reader. Object headers are
|
||||
// lines that do not start with an end-of-section marker "$$". The first header
|
||||
// is the objabi header. On success, the reader will be positioned at the beginning
|
||||
// of the end-of-section marker.
|
||||
//
|
||||
// It returns an error if any header does not fit in r.Size() bytes.
|
||||
func ReadObjectHeaders(r *bufio.Reader) (objapi string, headers []string, err error) {
|
||||
// line is a temporary buffer for headers.
|
||||
// Use bounded reads (ReadSlice, Peek) to limit risk of malformed inputs.
|
||||
var line []byte
|
||||
|
||||
// objapi header should be the first line
|
||||
if line, err = r.ReadSlice('\n'); err != nil {
|
||||
err = fmt.Errorf("can't find export data (%v)", err)
|
||||
return
|
||||
}
|
||||
objapi = string(line)
|
||||
|
||||
// objapi header begins with "go object ".
|
||||
if !strings.HasPrefix(objapi, "go object ") {
|
||||
err = fmt.Errorf("not a go object file: %s", objapi)
|
||||
return
|
||||
}
|
||||
|
||||
// process remaining object header lines
|
||||
for {
|
||||
// check for an end of section marker "$$"
|
||||
line, err = r.Peek(2)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if string(line) == "$$" {
|
||||
return // stop
|
||||
}
|
||||
|
||||
// read next header
|
||||
line, err = r.ReadSlice('\n')
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
headers = append(headers, string(line))
|
||||
}
|
||||
}
|
||||
|
||||
// ReadExportDataHeader reads the export data header and format from r.
|
||||
// It returns the number of bytes read, or an error if the format is no longer
|
||||
// supported or it failed to read.
|
||||
//
|
||||
// The only currently supported format is binary export data in the
|
||||
// unified export format.
|
||||
func ReadExportDataHeader(r *bufio.Reader) (n int, err error) {
|
||||
// Read export data header.
|
||||
line, err := r.ReadSlice('\n')
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
hdr := string(line)
|
||||
switch hdr {
|
||||
case "$$\n":
|
||||
err = fmt.Errorf("old textual export format no longer supported (recompile package)")
|
||||
return
|
||||
|
||||
case "$$B\n":
|
||||
var format byte
|
||||
format, err = r.ReadByte()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
// The unified export format starts with a 'u'.
|
||||
switch format {
|
||||
case 'u':
|
||||
default:
|
||||
// Older no longer supported export formats include:
|
||||
// indexed export format which started with an 'i'; and
|
||||
// the older binary export format which started with a 'c',
|
||||
// 'd', or 'v' (from "version").
|
||||
err = fmt.Errorf("binary export format %q is no longer supported (recompile package)", format)
|
||||
return
|
||||
}
|
||||
|
||||
default:
|
||||
err = fmt.Errorf("unknown export data header: %q", hdr)
|
||||
return
|
||||
}
|
||||
|
||||
n = len(hdr) + 1 // + 1 is for 'u'
|
||||
return
|
||||
}
|
||||
|
||||
// FindPkg returns the filename and unique package id for an import
|
||||
// path based on package information provided by build.Import (using
|
||||
// the build.Default build.Context). A relative srcDir is interpreted
|
||||
// relative to the current working directory.
|
||||
//
|
||||
// FindPkg is only used in tests within x/tools.
|
||||
func FindPkg(path, srcDir string) (filename, id string, err error) {
|
||||
// TODO(taking): Move internal/exportdata.FindPkg into its own file,
|
||||
// and then this copy into a _test package.
|
||||
if path == "" {
|
||||
return "", "", errors.New("path is empty")
|
||||
}
|
||||
|
||||
var noext string
|
||||
switch {
|
||||
default:
|
||||
// "x" -> "$GOPATH/pkg/$GOOS_$GOARCH/x.ext", "x"
|
||||
// Don't require the source files to be present.
|
||||
if abs, err := filepath.Abs(srcDir); err == nil { // see issue 14282
|
||||
srcDir = abs
|
||||
}
|
||||
var bp *build.Package
|
||||
bp, err = build.Import(path, srcDir, build.FindOnly|build.AllowBinary)
|
||||
if bp.PkgObj == "" {
|
||||
if bp.Goroot && bp.Dir != "" {
|
||||
filename, err = lookupGorootExport(bp.Dir)
|
||||
if err == nil {
|
||||
_, err = os.Stat(filename)
|
||||
}
|
||||
if err == nil {
|
||||
return filename, bp.ImportPath, nil
|
||||
}
|
||||
}
|
||||
goto notfound
|
||||
} else {
|
||||
noext = strings.TrimSuffix(bp.PkgObj, ".a")
|
||||
}
|
||||
id = bp.ImportPath
|
||||
|
||||
case build.IsLocalImport(path):
|
||||
// "./x" -> "/this/directory/x.ext", "/this/directory/x"
|
||||
noext = filepath.Join(srcDir, path)
|
||||
id = noext
|
||||
|
||||
case filepath.IsAbs(path):
|
||||
// for completeness only - go/build.Import
|
||||
// does not support absolute imports
|
||||
// "/x" -> "/x.ext", "/x"
|
||||
noext = path
|
||||
id = path
|
||||
}
|
||||
|
||||
if false { // for debugging
|
||||
if path != id {
|
||||
fmt.Printf("%s -> %s\n", path, id)
|
||||
}
|
||||
}
|
||||
|
||||
// try extensions
|
||||
for _, ext := range pkgExts {
|
||||
filename = noext + ext
|
||||
f, statErr := os.Stat(filename)
|
||||
if statErr == nil && !f.IsDir() {
|
||||
return filename, id, nil
|
||||
}
|
||||
if err == nil {
|
||||
err = statErr
|
||||
}
|
||||
}
|
||||
|
||||
notfound:
|
||||
if err == nil {
|
||||
return "", path, fmt.Errorf("can't find import: %q", path)
|
||||
}
|
||||
return "", path, fmt.Errorf("can't find import: %q: %w", path, err)
|
||||
}
|
||||
|
||||
var pkgExts = [...]string{".a", ".o"} // a file from the build cache will have no extension
|
||||
|
||||
var exportMap sync.Map // package dir → func() (string, error)
|
||||
|
||||
// lookupGorootExport returns the location of the export data
|
||||
// (normally found in the build cache, but located in GOROOT/pkg
|
||||
// in prior Go releases) for the package located in pkgDir.
|
||||
//
|
||||
// (We use the package's directory instead of its import path
|
||||
// mainly to simplify handling of the packages in src/vendor
|
||||
// and cmd/vendor.)
|
||||
//
|
||||
// lookupGorootExport is only used in tests within x/tools.
|
||||
func lookupGorootExport(pkgDir string) (string, error) {
|
||||
f, ok := exportMap.Load(pkgDir)
|
||||
if !ok {
|
||||
var (
|
||||
listOnce sync.Once
|
||||
exportPath string
|
||||
err error
|
||||
)
|
||||
f, _ = exportMap.LoadOrStore(pkgDir, func() (string, error) {
|
||||
listOnce.Do(func() {
|
||||
cmd := exec.Command(filepath.Join(build.Default.GOROOT, "bin", "go"), "list", "-export", "-f", "{{.Export}}", pkgDir)
|
||||
cmd.Dir = build.Default.GOROOT
|
||||
cmd.Env = append(os.Environ(), "PWD="+cmd.Dir, "GOROOT="+build.Default.GOROOT)
|
||||
var output []byte
|
||||
output, err = cmd.Output()
|
||||
if err != nil {
|
||||
if ee, ok := err.(*exec.ExitError); ok && len(ee.Stderr) > 0 {
|
||||
err = errors.New(string(ee.Stderr))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
exports := strings.Split(string(bytes.TrimSpace(output)), "\n")
|
||||
if len(exports) != 1 {
|
||||
err = fmt.Errorf("go list reported %d exports; expected 1", len(exports))
|
||||
return
|
||||
}
|
||||
|
||||
exportPath = exports[0]
|
||||
})
|
||||
|
||||
return exportPath, err
|
||||
})
|
||||
}
|
||||
|
||||
return f.(func() (string, error))()
|
||||
}
|
||||
|
179
vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go
generated
vendored
179
vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go
generated
vendored
@ -23,17 +23,11 @@ package gcimporter // import "golang.org/x/tools/internal/gcimporter"
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"fmt"
|
||||
"go/build"
|
||||
"go/token"
|
||||
"go/types"
|
||||
"io"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -45,127 +39,14 @@ const (
|
||||
trace = false
|
||||
)
|
||||
|
||||
var exportMap sync.Map // package dir → func() (string, bool)
|
||||
|
||||
// lookupGorootExport returns the location of the export data
|
||||
// (normally found in the build cache, but located in GOROOT/pkg
|
||||
// in prior Go releases) for the package located in pkgDir.
|
||||
//
|
||||
// (We use the package's directory instead of its import path
|
||||
// mainly to simplify handling of the packages in src/vendor
|
||||
// and cmd/vendor.)
|
||||
func lookupGorootExport(pkgDir string) (string, bool) {
|
||||
f, ok := exportMap.Load(pkgDir)
|
||||
if !ok {
|
||||
var (
|
||||
listOnce sync.Once
|
||||
exportPath string
|
||||
)
|
||||
f, _ = exportMap.LoadOrStore(pkgDir, func() (string, bool) {
|
||||
listOnce.Do(func() {
|
||||
cmd := exec.Command("go", "list", "-export", "-f", "{{.Export}}", pkgDir)
|
||||
cmd.Dir = build.Default.GOROOT
|
||||
var output []byte
|
||||
output, err := cmd.Output()
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
exports := strings.Split(string(bytes.TrimSpace(output)), "\n")
|
||||
if len(exports) != 1 {
|
||||
return
|
||||
}
|
||||
|
||||
exportPath = exports[0]
|
||||
})
|
||||
|
||||
return exportPath, exportPath != ""
|
||||
})
|
||||
}
|
||||
|
||||
return f.(func() (string, bool))()
|
||||
}
|
||||
|
||||
var pkgExts = [...]string{".a", ".o"}
|
||||
|
||||
// FindPkg returns the filename and unique package id for an import
|
||||
// path based on package information provided by build.Import (using
|
||||
// the build.Default build.Context). A relative srcDir is interpreted
|
||||
// relative to the current working directory.
|
||||
// If no file was found, an empty filename is returned.
|
||||
func FindPkg(path, srcDir string) (filename, id string) {
|
||||
if path == "" {
|
||||
return
|
||||
}
|
||||
|
||||
var noext string
|
||||
switch {
|
||||
default:
|
||||
// "x" -> "$GOPATH/pkg/$GOOS_$GOARCH/x.ext", "x"
|
||||
// Don't require the source files to be present.
|
||||
if abs, err := filepath.Abs(srcDir); err == nil { // see issue 14282
|
||||
srcDir = abs
|
||||
}
|
||||
bp, _ := build.Import(path, srcDir, build.FindOnly|build.AllowBinary)
|
||||
if bp.PkgObj == "" {
|
||||
var ok bool
|
||||
if bp.Goroot && bp.Dir != "" {
|
||||
filename, ok = lookupGorootExport(bp.Dir)
|
||||
}
|
||||
if !ok {
|
||||
id = path // make sure we have an id to print in error message
|
||||
return
|
||||
}
|
||||
} else {
|
||||
noext = strings.TrimSuffix(bp.PkgObj, ".a")
|
||||
id = bp.ImportPath
|
||||
}
|
||||
|
||||
case build.IsLocalImport(path):
|
||||
// "./x" -> "/this/directory/x.ext", "/this/directory/x"
|
||||
noext = filepath.Join(srcDir, path)
|
||||
id = noext
|
||||
|
||||
case filepath.IsAbs(path):
|
||||
// for completeness only - go/build.Import
|
||||
// does not support absolute imports
|
||||
// "/x" -> "/x.ext", "/x"
|
||||
noext = path
|
||||
id = path
|
||||
}
|
||||
|
||||
if false { // for debugging
|
||||
if path != id {
|
||||
fmt.Printf("%s -> %s\n", path, id)
|
||||
}
|
||||
}
|
||||
|
||||
if filename != "" {
|
||||
if f, err := os.Stat(filename); err == nil && !f.IsDir() {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// try extensions
|
||||
for _, ext := range pkgExts {
|
||||
filename = noext + ext
|
||||
if f, err := os.Stat(filename); err == nil && !f.IsDir() {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
filename = "" // not found
|
||||
return
|
||||
}
|
||||
|
||||
// Import imports a gc-generated package given its import path and srcDir, adds
|
||||
// the corresponding package object to the packages map, and returns the object.
|
||||
// The packages map must contain all packages already imported.
|
||||
//
|
||||
// TODO(taking): Import is only used in tests. Move to gcimporter_test.
|
||||
func Import(packages map[string]*types.Package, path, srcDir string, lookup func(path string) (io.ReadCloser, error)) (pkg *types.Package, err error) {
|
||||
// Import is only used in tests.
|
||||
func Import(fset *token.FileSet, packages map[string]*types.Package, path, srcDir string, lookup func(path string) (io.ReadCloser, error)) (pkg *types.Package, err error) {
|
||||
var rc io.ReadCloser
|
||||
var filename, id string
|
||||
var id string
|
||||
if lookup != nil {
|
||||
// With custom lookup specified, assume that caller has
|
||||
// converted path to a canonical import path for use in the map.
|
||||
@ -184,12 +65,13 @@ func Import(packages map[string]*types.Package, path, srcDir string, lookup func
|
||||
}
|
||||
rc = f
|
||||
} else {
|
||||
filename, id = FindPkg(path, srcDir)
|
||||
var filename string
|
||||
filename, id, err = FindPkg(path, srcDir)
|
||||
if filename == "" {
|
||||
if path == "unsafe" {
|
||||
return types.Unsafe, nil
|
||||
}
|
||||
return nil, fmt.Errorf("can't find import: %q", id)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// no need to re-import if the package was imported completely before
|
||||
@ -212,54 +94,15 @@ func Import(packages map[string]*types.Package, path, srcDir string, lookup func
|
||||
}
|
||||
defer rc.Close()
|
||||
|
||||
var size int64
|
||||
buf := bufio.NewReader(rc)
|
||||
if size, err = FindExportData(buf); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
var data []byte
|
||||
data, err = io.ReadAll(buf)
|
||||
data, err := ReadUnified(buf)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("import %q: %v", path, err)
|
||||
return
|
||||
}
|
||||
if len(data) == 0 {
|
||||
return nil, fmt.Errorf("no data to load a package from for path %s", id)
|
||||
}
|
||||
|
||||
// TODO(gri): allow clients of go/importer to provide a FileSet.
|
||||
// Or, define a new standard go/types/gcexportdata package.
|
||||
fset := token.NewFileSet()
|
||||
// unified: emitted by cmd/compile since go1.20.
|
||||
_, pkg, err = UImportData(fset, packages, data, id)
|
||||
|
||||
// Select appropriate importer.
|
||||
switch data[0] {
|
||||
case 'v', 'c', 'd':
|
||||
// binary: emitted by cmd/compile till go1.10; obsolete.
|
||||
return nil, fmt.Errorf("binary (%c) import format is no longer supported", data[0])
|
||||
|
||||
case 'i':
|
||||
// indexed: emitted by cmd/compile till go1.19;
|
||||
// now used only for serializing go/types.
|
||||
// See https://github.com/golang/go/issues/69491.
|
||||
_, pkg, err := IImportData(fset, packages, data[1:], id)
|
||||
return pkg, err
|
||||
|
||||
case 'u':
|
||||
// unified: emitted by cmd/compile since go1.20.
|
||||
_, pkg, err := UImportData(fset, packages, data[1:size], id)
|
||||
return pkg, err
|
||||
|
||||
default:
|
||||
l := len(data)
|
||||
if l > 10 {
|
||||
l = 10
|
||||
}
|
||||
return nil, fmt.Errorf("unexpected export data with prefix %q for path %s", string(data[:l]), id)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
type byPath []*types.Package
|
||||
|
||||
func (a byPath) Len() int { return len(a) }
|
||||
func (a byPath) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
func (a byPath) Less(i, j int) bool { return a[i].Path() < a[j].Path() }
|
||||
|
8
vendor/golang.org/x/tools/internal/gcimporter/iimport.go
generated
vendored
8
vendor/golang.org/x/tools/internal/gcimporter/iimport.go
generated
vendored
@ -5,8 +5,6 @@
|
||||
// Indexed package import.
|
||||
// See iexport.go for the export data format.
|
||||
|
||||
// This file is a copy of $GOROOT/src/go/internal/gcimporter/iimport.go.
|
||||
|
||||
package gcimporter
|
||||
|
||||
import (
|
||||
@ -1111,3 +1109,9 @@ func (r *importReader) byte() byte {
|
||||
}
|
||||
return x
|
||||
}
|
||||
|
||||
type byPath []*types.Package
|
||||
|
||||
func (a byPath) Len() int { return len(a) }
|
||||
func (a byPath) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
func (a byPath) Less(i, j int) bool { return a[i].Path() < a[j].Path() }
|
||||
|
30
vendor/golang.org/x/tools/internal/gcimporter/support.go
generated
vendored
Normal file
30
vendor/golang.org/x/tools/internal/gcimporter/support.go
generated
vendored
Normal file
@ -0,0 +1,30 @@
|
||||
// Copyright 2024 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package gcimporter
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"io"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Copy of $GOROOT/src/cmd/internal/archive.ReadHeader.
|
||||
func readArchiveHeader(b *bufio.Reader, name string) int {
|
||||
// architecture-independent object file output
|
||||
const HeaderSize = 60
|
||||
|
||||
var buf [HeaderSize]byte
|
||||
if _, err := io.ReadFull(b, buf[:]); err != nil {
|
||||
return -1
|
||||
}
|
||||
aname := strings.Trim(string(buf[0:16]), " ")
|
||||
if !strings.HasPrefix(aname, name) {
|
||||
return -1
|
||||
}
|
||||
asize := strings.Trim(string(buf[48:58]), " ")
|
||||
i, _ := strconv.Atoi(asize)
|
||||
return i
|
||||
}
|
9
vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go
generated
vendored
9
vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go
generated
vendored
@ -11,7 +11,6 @@ import (
|
||||
"go/token"
|
||||
"go/types"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/tools/internal/aliases"
|
||||
"golang.org/x/tools/internal/pkgbits"
|
||||
@ -71,7 +70,6 @@ func UImportData(fset *token.FileSet, imports map[string]*types.Package, data []
|
||||
}
|
||||
|
||||
s := string(data)
|
||||
s = s[:strings.LastIndex(s, "\n$$\n")]
|
||||
input := pkgbits.NewPkgDecoder(path, s)
|
||||
pkg = readUnifiedPackage(fset, nil, imports, input)
|
||||
return
|
||||
@ -266,7 +264,12 @@ func (pr *pkgReader) pkgIdx(idx pkgbits.Index) *types.Package {
|
||||
func (r *reader) doPkg() *types.Package {
|
||||
path := r.String()
|
||||
switch path {
|
||||
case "":
|
||||
// cmd/compile emits path="main" for main packages because
|
||||
// that's the linker symbol prefix it used; but we need
|
||||
// the package's path as it would be reported by go list,
|
||||
// hence "main" below.
|
||||
// See test at go/packages.TestMainPackagePathInModeTypes.
|
||||
case "", "main":
|
||||
path = r.p.PkgPath()
|
||||
case "builtin":
|
||||
return nil // universe
|
||||
|
219
vendor/golang.org/x/tools/internal/stdlib/manifest.go
generated
vendored
219
vendor/golang.org/x/tools/internal/stdlib/manifest.go
generated
vendored
@ -268,6 +268,8 @@ var PackageSymbols = map[string][]Symbol{
|
||||
{"ErrTooLarge", Var, 0},
|
||||
{"Fields", Func, 0},
|
||||
{"FieldsFunc", Func, 0},
|
||||
{"FieldsFuncSeq", Func, 24},
|
||||
{"FieldsSeq", Func, 24},
|
||||
{"HasPrefix", Func, 0},
|
||||
{"HasSuffix", Func, 0},
|
||||
{"Index", Func, 0},
|
||||
@ -280,6 +282,7 @@ var PackageSymbols = map[string][]Symbol{
|
||||
{"LastIndexAny", Func, 0},
|
||||
{"LastIndexByte", Func, 5},
|
||||
{"LastIndexFunc", Func, 0},
|
||||
{"Lines", Func, 24},
|
||||
{"Map", Func, 0},
|
||||
{"MinRead", Const, 0},
|
||||
{"NewBuffer", Func, 0},
|
||||
@ -293,7 +296,9 @@ var PackageSymbols = map[string][]Symbol{
|
||||
{"Split", Func, 0},
|
||||
{"SplitAfter", Func, 0},
|
||||
{"SplitAfterN", Func, 0},
|
||||
{"SplitAfterSeq", Func, 24},
|
||||
{"SplitN", Func, 0},
|
||||
{"SplitSeq", Func, 24},
|
||||
{"Title", Func, 0},
|
||||
{"ToLower", Func, 0},
|
||||
{"ToLowerSpecial", Func, 0},
|
||||
@ -535,6 +540,7 @@ var PackageSymbols = map[string][]Symbol{
|
||||
{"NewCTR", Func, 0},
|
||||
{"NewGCM", Func, 2},
|
||||
{"NewGCMWithNonceSize", Func, 5},
|
||||
{"NewGCMWithRandomNonce", Func, 24},
|
||||
{"NewGCMWithTagSize", Func, 11},
|
||||
{"NewOFB", Func, 0},
|
||||
{"Stream", Type, 0},
|
||||
@ -673,6 +679,14 @@ var PackageSymbols = map[string][]Symbol{
|
||||
{"Unmarshal", Func, 0},
|
||||
{"UnmarshalCompressed", Func, 15},
|
||||
},
|
||||
"crypto/fips140": {
|
||||
{"Enabled", Func, 24},
|
||||
},
|
||||
"crypto/hkdf": {
|
||||
{"Expand", Func, 24},
|
||||
{"Extract", Func, 24},
|
||||
{"Key", Func, 24},
|
||||
},
|
||||
"crypto/hmac": {
|
||||
{"Equal", Func, 1},
|
||||
{"New", Func, 0},
|
||||
@ -683,11 +697,43 @@ var PackageSymbols = map[string][]Symbol{
|
||||
{"Size", Const, 0},
|
||||
{"Sum", Func, 2},
|
||||
},
|
||||
"crypto/mlkem": {
|
||||
{"(*DecapsulationKey1024).Bytes", Method, 24},
|
||||
{"(*DecapsulationKey1024).Decapsulate", Method, 24},
|
||||
{"(*DecapsulationKey1024).EncapsulationKey", Method, 24},
|
||||
{"(*DecapsulationKey768).Bytes", Method, 24},
|
||||
{"(*DecapsulationKey768).Decapsulate", Method, 24},
|
||||
{"(*DecapsulationKey768).EncapsulationKey", Method, 24},
|
||||
{"(*EncapsulationKey1024).Bytes", Method, 24},
|
||||
{"(*EncapsulationKey1024).Encapsulate", Method, 24},
|
||||
{"(*EncapsulationKey768).Bytes", Method, 24},
|
||||
{"(*EncapsulationKey768).Encapsulate", Method, 24},
|
||||
{"CiphertextSize1024", Const, 24},
|
||||
{"CiphertextSize768", Const, 24},
|
||||
{"DecapsulationKey1024", Type, 24},
|
||||
{"DecapsulationKey768", Type, 24},
|
||||
{"EncapsulationKey1024", Type, 24},
|
||||
{"EncapsulationKey768", Type, 24},
|
||||
{"EncapsulationKeySize1024", Const, 24},
|
||||
{"EncapsulationKeySize768", Const, 24},
|
||||
{"GenerateKey1024", Func, 24},
|
||||
{"GenerateKey768", Func, 24},
|
||||
{"NewDecapsulationKey1024", Func, 24},
|
||||
{"NewDecapsulationKey768", Func, 24},
|
||||
{"NewEncapsulationKey1024", Func, 24},
|
||||
{"NewEncapsulationKey768", Func, 24},
|
||||
{"SeedSize", Const, 24},
|
||||
{"SharedKeySize", Const, 24},
|
||||
},
|
||||
"crypto/pbkdf2": {
|
||||
{"Key", Func, 24},
|
||||
},
|
||||
"crypto/rand": {
|
||||
{"Int", Func, 0},
|
||||
{"Prime", Func, 0},
|
||||
{"Read", Func, 0},
|
||||
{"Reader", Var, 0},
|
||||
{"Text", Func, 24},
|
||||
},
|
||||
"crypto/rc4": {
|
||||
{"(*Cipher).Reset", Method, 0},
|
||||
@ -766,6 +812,39 @@ var PackageSymbols = map[string][]Symbol{
|
||||
{"Sum224", Func, 2},
|
||||
{"Sum256", Func, 2},
|
||||
},
|
||||
"crypto/sha3": {
|
||||
{"(*SHA3).AppendBinary", Method, 24},
|
||||
{"(*SHA3).BlockSize", Method, 24},
|
||||
{"(*SHA3).MarshalBinary", Method, 24},
|
||||
{"(*SHA3).Reset", Method, 24},
|
||||
{"(*SHA3).Size", Method, 24},
|
||||
{"(*SHA3).Sum", Method, 24},
|
||||
{"(*SHA3).UnmarshalBinary", Method, 24},
|
||||
{"(*SHA3).Write", Method, 24},
|
||||
{"(*SHAKE).AppendBinary", Method, 24},
|
||||
{"(*SHAKE).BlockSize", Method, 24},
|
||||
{"(*SHAKE).MarshalBinary", Method, 24},
|
||||
{"(*SHAKE).Read", Method, 24},
|
||||
{"(*SHAKE).Reset", Method, 24},
|
||||
{"(*SHAKE).UnmarshalBinary", Method, 24},
|
||||
{"(*SHAKE).Write", Method, 24},
|
||||
{"New224", Func, 24},
|
||||
{"New256", Func, 24},
|
||||
{"New384", Func, 24},
|
||||
{"New512", Func, 24},
|
||||
{"NewCSHAKE128", Func, 24},
|
||||
{"NewCSHAKE256", Func, 24},
|
||||
{"NewSHAKE128", Func, 24},
|
||||
{"NewSHAKE256", Func, 24},
|
||||
{"SHA3", Type, 24},
|
||||
{"SHAKE", Type, 24},
|
||||
{"Sum224", Func, 24},
|
||||
{"Sum256", Func, 24},
|
||||
{"Sum384", Func, 24},
|
||||
{"Sum512", Func, 24},
|
||||
{"SumSHAKE128", Func, 24},
|
||||
{"SumSHAKE256", Func, 24},
|
||||
},
|
||||
"crypto/sha512": {
|
||||
{"BlockSize", Const, 0},
|
||||
{"New", Func, 0},
|
||||
@ -788,6 +867,7 @@ var PackageSymbols = map[string][]Symbol{
|
||||
{"ConstantTimeEq", Func, 0},
|
||||
{"ConstantTimeLessOrEq", Func, 2},
|
||||
{"ConstantTimeSelect", Func, 0},
|
||||
{"WithDataIndependentTiming", Func, 24},
|
||||
{"XORBytes", Func, 20},
|
||||
},
|
||||
"crypto/tls": {
|
||||
@ -864,6 +944,7 @@ var PackageSymbols = map[string][]Symbol{
|
||||
{"ClientHelloInfo", Type, 4},
|
||||
{"ClientHelloInfo.CipherSuites", Field, 4},
|
||||
{"ClientHelloInfo.Conn", Field, 8},
|
||||
{"ClientHelloInfo.Extensions", Field, 24},
|
||||
{"ClientHelloInfo.ServerName", Field, 4},
|
||||
{"ClientHelloInfo.SignatureSchemes", Field, 8},
|
||||
{"ClientHelloInfo.SupportedCurves", Field, 4},
|
||||
@ -881,6 +962,7 @@ var PackageSymbols = map[string][]Symbol{
|
||||
{"Config.CurvePreferences", Field, 3},
|
||||
{"Config.DynamicRecordSizingDisabled", Field, 7},
|
||||
{"Config.EncryptedClientHelloConfigList", Field, 23},
|
||||
{"Config.EncryptedClientHelloKeys", Field, 24},
|
||||
{"Config.EncryptedClientHelloRejectionVerify", Field, 23},
|
||||
{"Config.GetCertificate", Field, 4},
|
||||
{"Config.GetClientCertificate", Field, 8},
|
||||
@ -934,6 +1016,10 @@ var PackageSymbols = map[string][]Symbol{
|
||||
{"ECHRejectionError", Type, 23},
|
||||
{"ECHRejectionError.RetryConfigList", Field, 23},
|
||||
{"Ed25519", Const, 13},
|
||||
{"EncryptedClientHelloKey", Type, 24},
|
||||
{"EncryptedClientHelloKey.Config", Field, 24},
|
||||
{"EncryptedClientHelloKey.PrivateKey", Field, 24},
|
||||
{"EncryptedClientHelloKey.SendAsRetry", Field, 24},
|
||||
{"InsecureCipherSuites", Func, 14},
|
||||
{"Listen", Func, 0},
|
||||
{"LoadX509KeyPair", Func, 0},
|
||||
@ -1032,6 +1118,7 @@ var PackageSymbols = map[string][]Symbol{
|
||||
{"VersionTLS12", Const, 2},
|
||||
{"VersionTLS13", Const, 12},
|
||||
{"X25519", Const, 8},
|
||||
{"X25519MLKEM768", Const, 24},
|
||||
{"X509KeyPair", Func, 0},
|
||||
},
|
||||
"crypto/x509": {
|
||||
@ -1056,6 +1143,8 @@ var PackageSymbols = map[string][]Symbol{
|
||||
{"(ConstraintViolationError).Error", Method, 0},
|
||||
{"(HostnameError).Error", Method, 0},
|
||||
{"(InsecureAlgorithmError).Error", Method, 6},
|
||||
{"(OID).AppendBinary", Method, 24},
|
||||
{"(OID).AppendText", Method, 24},
|
||||
{"(OID).Equal", Method, 22},
|
||||
{"(OID).EqualASN1OID", Method, 22},
|
||||
{"(OID).MarshalBinary", Method, 23},
|
||||
@ -1084,6 +1173,10 @@ var PackageSymbols = map[string][]Symbol{
|
||||
{"Certificate.Extensions", Field, 2},
|
||||
{"Certificate.ExtraExtensions", Field, 2},
|
||||
{"Certificate.IPAddresses", Field, 1},
|
||||
{"Certificate.InhibitAnyPolicy", Field, 24},
|
||||
{"Certificate.InhibitAnyPolicyZero", Field, 24},
|
||||
{"Certificate.InhibitPolicyMapping", Field, 24},
|
||||
{"Certificate.InhibitPolicyMappingZero", Field, 24},
|
||||
{"Certificate.IsCA", Field, 0},
|
||||
{"Certificate.Issuer", Field, 0},
|
||||
{"Certificate.IssuingCertificateURL", Field, 2},
|
||||
@ -1100,6 +1193,7 @@ var PackageSymbols = map[string][]Symbol{
|
||||
{"Certificate.PermittedURIDomains", Field, 10},
|
||||
{"Certificate.Policies", Field, 22},
|
||||
{"Certificate.PolicyIdentifiers", Field, 0},
|
||||
{"Certificate.PolicyMappings", Field, 24},
|
||||
{"Certificate.PublicKey", Field, 0},
|
||||
{"Certificate.PublicKeyAlgorithm", Field, 0},
|
||||
{"Certificate.Raw", Field, 0},
|
||||
@ -1107,6 +1201,8 @@ var PackageSymbols = map[string][]Symbol{
|
||||
{"Certificate.RawSubject", Field, 0},
|
||||
{"Certificate.RawSubjectPublicKeyInfo", Field, 0},
|
||||
{"Certificate.RawTBSCertificate", Field, 0},
|
||||
{"Certificate.RequireExplicitPolicy", Field, 24},
|
||||
{"Certificate.RequireExplicitPolicyZero", Field, 24},
|
||||
{"Certificate.SerialNumber", Field, 0},
|
||||
{"Certificate.Signature", Field, 0},
|
||||
{"Certificate.SignatureAlgorithm", Field, 0},
|
||||
@ -1198,6 +1294,7 @@ var PackageSymbols = map[string][]Symbol{
|
||||
{"NameConstraintsWithoutSANs", Const, 10},
|
||||
{"NameMismatch", Const, 8},
|
||||
{"NewCertPool", Func, 0},
|
||||
{"NoValidChains", Const, 24},
|
||||
{"NotAuthorizedToSign", Const, 0},
|
||||
{"OID", Type, 22},
|
||||
{"OIDFromInts", Func, 22},
|
||||
@ -1219,6 +1316,9 @@ var PackageSymbols = map[string][]Symbol{
|
||||
{"ParsePKCS8PrivateKey", Func, 0},
|
||||
{"ParsePKIXPublicKey", Func, 0},
|
||||
{"ParseRevocationList", Func, 19},
|
||||
{"PolicyMapping", Type, 24},
|
||||
{"PolicyMapping.IssuerDomainPolicy", Field, 24},
|
||||
{"PolicyMapping.SubjectDomainPolicy", Field, 24},
|
||||
{"PublicKeyAlgorithm", Type, 0},
|
||||
{"PureEd25519", Const, 13},
|
||||
{"RSA", Const, 0},
|
||||
@ -1265,6 +1365,7 @@ var PackageSymbols = map[string][]Symbol{
|
||||
{"UnknownPublicKeyAlgorithm", Const, 0},
|
||||
{"UnknownSignatureAlgorithm", Const, 0},
|
||||
{"VerifyOptions", Type, 0},
|
||||
{"VerifyOptions.CertificatePolicies", Field, 24},
|
||||
{"VerifyOptions.CurrentTime", Field, 0},
|
||||
{"VerifyOptions.DNSName", Field, 0},
|
||||
{"VerifyOptions.Intermediates", Field, 0},
|
||||
@ -1975,6 +2076,8 @@ var PackageSymbols = map[string][]Symbol{
|
||||
{"(*File).DynString", Method, 1},
|
||||
{"(*File).DynValue", Method, 21},
|
||||
{"(*File).DynamicSymbols", Method, 4},
|
||||
{"(*File).DynamicVersionNeeds", Method, 24},
|
||||
{"(*File).DynamicVersions", Method, 24},
|
||||
{"(*File).ImportedLibraries", Method, 0},
|
||||
{"(*File).ImportedSymbols", Method, 0},
|
||||
{"(*File).Section", Method, 0},
|
||||
@ -2240,6 +2343,19 @@ var PackageSymbols = map[string][]Symbol{
|
||||
{"DynFlag", Type, 0},
|
||||
{"DynFlag1", Type, 21},
|
||||
{"DynTag", Type, 0},
|
||||
{"DynamicVersion", Type, 24},
|
||||
{"DynamicVersion.Deps", Field, 24},
|
||||
{"DynamicVersion.Flags", Field, 24},
|
||||
{"DynamicVersion.Index", Field, 24},
|
||||
{"DynamicVersion.Name", Field, 24},
|
||||
{"DynamicVersionDep", Type, 24},
|
||||
{"DynamicVersionDep.Dep", Field, 24},
|
||||
{"DynamicVersionDep.Flags", Field, 24},
|
||||
{"DynamicVersionDep.Index", Field, 24},
|
||||
{"DynamicVersionFlag", Type, 24},
|
||||
{"DynamicVersionNeed", Type, 24},
|
||||
{"DynamicVersionNeed.Name", Field, 24},
|
||||
{"DynamicVersionNeed.Needs", Field, 24},
|
||||
{"EI_ABIVERSION", Const, 0},
|
||||
{"EI_CLASS", Const, 0},
|
||||
{"EI_DATA", Const, 0},
|
||||
@ -3726,8 +3842,19 @@ var PackageSymbols = map[string][]Symbol{
|
||||
{"Symbol.Size", Field, 0},
|
||||
{"Symbol.Value", Field, 0},
|
||||
{"Symbol.Version", Field, 13},
|
||||
{"Symbol.VersionIndex", Field, 24},
|
||||
{"Symbol.VersionScope", Field, 24},
|
||||
{"SymbolVersionScope", Type, 24},
|
||||
{"Type", Type, 0},
|
||||
{"VER_FLG_BASE", Const, 24},
|
||||
{"VER_FLG_INFO", Const, 24},
|
||||
{"VER_FLG_WEAK", Const, 24},
|
||||
{"Version", Type, 0},
|
||||
{"VersionScopeGlobal", Const, 24},
|
||||
{"VersionScopeHidden", Const, 24},
|
||||
{"VersionScopeLocal", Const, 24},
|
||||
{"VersionScopeNone", Const, 24},
|
||||
{"VersionScopeSpecific", Const, 24},
|
||||
},
|
||||
"debug/gosym": {
|
||||
{"(*DecodingError).Error", Method, 0},
|
||||
@ -4453,8 +4580,10 @@ var PackageSymbols = map[string][]Symbol{
|
||||
{"FS", Type, 16},
|
||||
},
|
||||
"encoding": {
|
||||
{"BinaryAppender", Type, 24},
|
||||
{"BinaryMarshaler", Type, 2},
|
||||
{"BinaryUnmarshaler", Type, 2},
|
||||
{"TextAppender", Type, 24},
|
||||
{"TextMarshaler", Type, 2},
|
||||
{"TextUnmarshaler", Type, 2},
|
||||
},
|
||||
@ -5984,13 +6113,16 @@ var PackageSymbols = map[string][]Symbol{
|
||||
{"(*Interface).Complete", Method, 5},
|
||||
{"(*Interface).Embedded", Method, 5},
|
||||
{"(*Interface).EmbeddedType", Method, 11},
|
||||
{"(*Interface).EmbeddedTypes", Method, 24},
|
||||
{"(*Interface).Empty", Method, 5},
|
||||
{"(*Interface).ExplicitMethod", Method, 5},
|
||||
{"(*Interface).ExplicitMethods", Method, 24},
|
||||
{"(*Interface).IsComparable", Method, 18},
|
||||
{"(*Interface).IsImplicit", Method, 18},
|
||||
{"(*Interface).IsMethodSet", Method, 18},
|
||||
{"(*Interface).MarkImplicit", Method, 18},
|
||||
{"(*Interface).Method", Method, 5},
|
||||
{"(*Interface).Methods", Method, 24},
|
||||
{"(*Interface).NumEmbeddeds", Method, 5},
|
||||
{"(*Interface).NumExplicitMethods", Method, 5},
|
||||
{"(*Interface).NumMethods", Method, 5},
|
||||
@ -6011,9 +6143,11 @@ var PackageSymbols = map[string][]Symbol{
|
||||
{"(*MethodSet).At", Method, 5},
|
||||
{"(*MethodSet).Len", Method, 5},
|
||||
{"(*MethodSet).Lookup", Method, 5},
|
||||
{"(*MethodSet).Methods", Method, 24},
|
||||
{"(*MethodSet).String", Method, 5},
|
||||
{"(*Named).AddMethod", Method, 5},
|
||||
{"(*Named).Method", Method, 5},
|
||||
{"(*Named).Methods", Method, 24},
|
||||
{"(*Named).NumMethods", Method, 5},
|
||||
{"(*Named).Obj", Method, 5},
|
||||
{"(*Named).Origin", Method, 18},
|
||||
@ -6054,6 +6188,7 @@ var PackageSymbols = map[string][]Symbol{
|
||||
{"(*Pointer).String", Method, 5},
|
||||
{"(*Pointer).Underlying", Method, 5},
|
||||
{"(*Scope).Child", Method, 5},
|
||||
{"(*Scope).Children", Method, 24},
|
||||
{"(*Scope).Contains", Method, 5},
|
||||
{"(*Scope).End", Method, 5},
|
||||
{"(*Scope).Innermost", Method, 5},
|
||||
@ -6089,6 +6224,7 @@ var PackageSymbols = map[string][]Symbol{
|
||||
{"(*StdSizes).Offsetsof", Method, 5},
|
||||
{"(*StdSizes).Sizeof", Method, 5},
|
||||
{"(*Struct).Field", Method, 5},
|
||||
{"(*Struct).Fields", Method, 24},
|
||||
{"(*Struct).NumFields", Method, 5},
|
||||
{"(*Struct).String", Method, 5},
|
||||
{"(*Struct).Tag", Method, 5},
|
||||
@ -6100,8 +6236,10 @@ var PackageSymbols = map[string][]Symbol{
|
||||
{"(*Tuple).Len", Method, 5},
|
||||
{"(*Tuple).String", Method, 5},
|
||||
{"(*Tuple).Underlying", Method, 5},
|
||||
{"(*Tuple).Variables", Method, 24},
|
||||
{"(*TypeList).At", Method, 18},
|
||||
{"(*TypeList).Len", Method, 18},
|
||||
{"(*TypeList).Types", Method, 24},
|
||||
{"(*TypeName).Exported", Method, 5},
|
||||
{"(*TypeName).Id", Method, 5},
|
||||
{"(*TypeName).IsAlias", Method, 9},
|
||||
@ -6119,9 +6257,11 @@ var PackageSymbols = map[string][]Symbol{
|
||||
{"(*TypeParam).Underlying", Method, 18},
|
||||
{"(*TypeParamList).At", Method, 18},
|
||||
{"(*TypeParamList).Len", Method, 18},
|
||||
{"(*TypeParamList).TypeParams", Method, 24},
|
||||
{"(*Union).Len", Method, 18},
|
||||
{"(*Union).String", Method, 18},
|
||||
{"(*Union).Term", Method, 18},
|
||||
{"(*Union).Terms", Method, 24},
|
||||
{"(*Union).Underlying", Method, 18},
|
||||
{"(*Var).Anonymous", Method, 5},
|
||||
{"(*Var).Embedded", Method, 11},
|
||||
@ -6392,10 +6532,12 @@ var PackageSymbols = map[string][]Symbol{
|
||||
{"(*Hash).WriteByte", Method, 14},
|
||||
{"(*Hash).WriteString", Method, 14},
|
||||
{"Bytes", Func, 19},
|
||||
{"Comparable", Func, 24},
|
||||
{"Hash", Type, 14},
|
||||
{"MakeSeed", Func, 14},
|
||||
{"Seed", Type, 14},
|
||||
{"String", Func, 19},
|
||||
{"WriteComparable", Func, 24},
|
||||
},
|
||||
"html": {
|
||||
{"EscapeString", Func, 0},
|
||||
@ -7082,6 +7224,7 @@ var PackageSymbols = map[string][]Symbol{
|
||||
{"(*JSONHandler).WithGroup", Method, 21},
|
||||
{"(*Level).UnmarshalJSON", Method, 21},
|
||||
{"(*Level).UnmarshalText", Method, 21},
|
||||
{"(*LevelVar).AppendText", Method, 24},
|
||||
{"(*LevelVar).Level", Method, 21},
|
||||
{"(*LevelVar).MarshalText", Method, 21},
|
||||
{"(*LevelVar).Set", Method, 21},
|
||||
@ -7110,6 +7253,7 @@ var PackageSymbols = map[string][]Symbol{
|
||||
{"(Attr).Equal", Method, 21},
|
||||
{"(Attr).String", Method, 21},
|
||||
{"(Kind).String", Method, 21},
|
||||
{"(Level).AppendText", Method, 24},
|
||||
{"(Level).Level", Method, 21},
|
||||
{"(Level).MarshalJSON", Method, 21},
|
||||
{"(Level).MarshalText", Method, 21},
|
||||
@ -7140,6 +7284,7 @@ var PackageSymbols = map[string][]Symbol{
|
||||
{"Debug", Func, 21},
|
||||
{"DebugContext", Func, 21},
|
||||
{"Default", Func, 21},
|
||||
{"DiscardHandler", Var, 24},
|
||||
{"Duration", Func, 21},
|
||||
{"DurationValue", Func, 21},
|
||||
{"Error", Func, 21},
|
||||
@ -7375,6 +7520,7 @@ var PackageSymbols = map[string][]Symbol{
|
||||
{"(*Float).Acc", Method, 5},
|
||||
{"(*Float).Add", Method, 5},
|
||||
{"(*Float).Append", Method, 5},
|
||||
{"(*Float).AppendText", Method, 24},
|
||||
{"(*Float).Cmp", Method, 5},
|
||||
{"(*Float).Copy", Method, 5},
|
||||
{"(*Float).Float32", Method, 5},
|
||||
@ -7421,6 +7567,7 @@ var PackageSymbols = map[string][]Symbol{
|
||||
{"(*Int).And", Method, 0},
|
||||
{"(*Int).AndNot", Method, 0},
|
||||
{"(*Int).Append", Method, 6},
|
||||
{"(*Int).AppendText", Method, 24},
|
||||
{"(*Int).Binomial", Method, 0},
|
||||
{"(*Int).Bit", Method, 0},
|
||||
{"(*Int).BitLen", Method, 0},
|
||||
@ -7477,6 +7624,7 @@ var PackageSymbols = map[string][]Symbol{
|
||||
{"(*Int).Xor", Method, 0},
|
||||
{"(*Rat).Abs", Method, 0},
|
||||
{"(*Rat).Add", Method, 0},
|
||||
{"(*Rat).AppendText", Method, 24},
|
||||
{"(*Rat).Cmp", Method, 0},
|
||||
{"(*Rat).Denom", Method, 0},
|
||||
{"(*Rat).Float32", Method, 4},
|
||||
@ -7659,11 +7807,13 @@ var PackageSymbols = map[string][]Symbol{
|
||||
{"Zipf", Type, 0},
|
||||
},
|
||||
"math/rand/v2": {
|
||||
{"(*ChaCha8).AppendBinary", Method, 24},
|
||||
{"(*ChaCha8).MarshalBinary", Method, 22},
|
||||
{"(*ChaCha8).Read", Method, 23},
|
||||
{"(*ChaCha8).Seed", Method, 22},
|
||||
{"(*ChaCha8).Uint64", Method, 22},
|
||||
{"(*ChaCha8).UnmarshalBinary", Method, 22},
|
||||
{"(*PCG).AppendBinary", Method, 24},
|
||||
{"(*PCG).MarshalBinary", Method, 22},
|
||||
{"(*PCG).Seed", Method, 22},
|
||||
{"(*PCG).Uint64", Method, 22},
|
||||
@ -7931,6 +8081,7 @@ var PackageSymbols = map[string][]Symbol{
|
||||
{"(*UnixListener).SyscallConn", Method, 10},
|
||||
{"(Flags).String", Method, 0},
|
||||
{"(HardwareAddr).String", Method, 0},
|
||||
{"(IP).AppendText", Method, 24},
|
||||
{"(IP).DefaultMask", Method, 0},
|
||||
{"(IP).Equal", Method, 0},
|
||||
{"(IP).IsGlobalUnicast", Method, 0},
|
||||
@ -8131,6 +8282,9 @@ var PackageSymbols = map[string][]Symbol{
|
||||
{"(*MaxBytesError).Error", Method, 19},
|
||||
{"(*ProtocolError).Error", Method, 0},
|
||||
{"(*ProtocolError).Is", Method, 21},
|
||||
{"(*Protocols).SetHTTP1", Method, 24},
|
||||
{"(*Protocols).SetHTTP2", Method, 24},
|
||||
{"(*Protocols).SetUnencryptedHTTP2", Method, 24},
|
||||
{"(*Request).AddCookie", Method, 0},
|
||||
{"(*Request).BasicAuth", Method, 4},
|
||||
{"(*Request).Clone", Method, 13},
|
||||
@ -8190,6 +8344,10 @@ var PackageSymbols = map[string][]Symbol{
|
||||
{"(Header).Values", Method, 14},
|
||||
{"(Header).Write", Method, 0},
|
||||
{"(Header).WriteSubset", Method, 0},
|
||||
{"(Protocols).HTTP1", Method, 24},
|
||||
{"(Protocols).HTTP2", Method, 24},
|
||||
{"(Protocols).String", Method, 24},
|
||||
{"(Protocols).UnencryptedHTTP2", Method, 24},
|
||||
{"AllowQuerySemicolons", Func, 17},
|
||||
{"CanonicalHeaderKey", Func, 0},
|
||||
{"Client", Type, 0},
|
||||
@ -8252,6 +8410,18 @@ var PackageSymbols = map[string][]Symbol{
|
||||
{"FileSystem", Type, 0},
|
||||
{"Flusher", Type, 0},
|
||||
{"Get", Func, 0},
|
||||
{"HTTP2Config", Type, 24},
|
||||
{"HTTP2Config.CountError", Field, 24},
|
||||
{"HTTP2Config.MaxConcurrentStreams", Field, 24},
|
||||
{"HTTP2Config.MaxDecoderHeaderTableSize", Field, 24},
|
||||
{"HTTP2Config.MaxEncoderHeaderTableSize", Field, 24},
|
||||
{"HTTP2Config.MaxReadFrameSize", Field, 24},
|
||||
{"HTTP2Config.MaxReceiveBufferPerConnection", Field, 24},
|
||||
{"HTTP2Config.MaxReceiveBufferPerStream", Field, 24},
|
||||
{"HTTP2Config.PermitProhibitedCipherSuites", Field, 24},
|
||||
{"HTTP2Config.PingTimeout", Field, 24},
|
||||
{"HTTP2Config.SendPingTimeout", Field, 24},
|
||||
{"HTTP2Config.WriteByteTimeout", Field, 24},
|
||||
{"Handle", Func, 0},
|
||||
{"HandleFunc", Func, 0},
|
||||
{"Handler", Type, 0},
|
||||
@ -8292,6 +8462,7 @@ var PackageSymbols = map[string][]Symbol{
|
||||
{"PostForm", Func, 0},
|
||||
{"ProtocolError", Type, 0},
|
||||
{"ProtocolError.ErrorString", Field, 0},
|
||||
{"Protocols", Type, 24},
|
||||
{"ProxyFromEnvironment", Func, 0},
|
||||
{"ProxyURL", Func, 0},
|
||||
{"PushOptions", Type, 8},
|
||||
@ -8361,9 +8532,11 @@ var PackageSymbols = map[string][]Symbol{
|
||||
{"Server.ConnState", Field, 3},
|
||||
{"Server.DisableGeneralOptionsHandler", Field, 20},
|
||||
{"Server.ErrorLog", Field, 3},
|
||||
{"Server.HTTP2", Field, 24},
|
||||
{"Server.Handler", Field, 0},
|
||||
{"Server.IdleTimeout", Field, 8},
|
||||
{"Server.MaxHeaderBytes", Field, 0},
|
||||
{"Server.Protocols", Field, 24},
|
||||
{"Server.ReadHeaderTimeout", Field, 8},
|
||||
{"Server.ReadTimeout", Field, 0},
|
||||
{"Server.TLSConfig", Field, 0},
|
||||
@ -8453,12 +8626,14 @@ var PackageSymbols = map[string][]Symbol{
|
||||
{"Transport.ExpectContinueTimeout", Field, 6},
|
||||
{"Transport.ForceAttemptHTTP2", Field, 13},
|
||||
{"Transport.GetProxyConnectHeader", Field, 16},
|
||||
{"Transport.HTTP2", Field, 24},
|
||||
{"Transport.IdleConnTimeout", Field, 7},
|
||||
{"Transport.MaxConnsPerHost", Field, 11},
|
||||
{"Transport.MaxIdleConns", Field, 7},
|
||||
{"Transport.MaxIdleConnsPerHost", Field, 0},
|
||||
{"Transport.MaxResponseHeaderBytes", Field, 7},
|
||||
{"Transport.OnProxyConnectResponse", Field, 20},
|
||||
{"Transport.Protocols", Field, 24},
|
||||
{"Transport.Proxy", Field, 0},
|
||||
{"Transport.ProxyConnectHeader", Field, 8},
|
||||
{"Transport.ReadBufferSize", Field, 13},
|
||||
@ -8646,6 +8821,8 @@ var PackageSymbols = map[string][]Symbol{
|
||||
{"(*AddrPort).UnmarshalText", Method, 18},
|
||||
{"(*Prefix).UnmarshalBinary", Method, 18},
|
||||
{"(*Prefix).UnmarshalText", Method, 18},
|
||||
{"(Addr).AppendBinary", Method, 24},
|
||||
{"(Addr).AppendText", Method, 24},
|
||||
{"(Addr).AppendTo", Method, 18},
|
||||
{"(Addr).As16", Method, 18},
|
||||
{"(Addr).As4", Method, 18},
|
||||
@ -8676,6 +8853,8 @@ var PackageSymbols = map[string][]Symbol{
|
||||
{"(Addr).WithZone", Method, 18},
|
||||
{"(Addr).Zone", Method, 18},
|
||||
{"(AddrPort).Addr", Method, 18},
|
||||
{"(AddrPort).AppendBinary", Method, 24},
|
||||
{"(AddrPort).AppendText", Method, 24},
|
||||
{"(AddrPort).AppendTo", Method, 18},
|
||||
{"(AddrPort).Compare", Method, 22},
|
||||
{"(AddrPort).IsValid", Method, 18},
|
||||
@ -8684,6 +8863,8 @@ var PackageSymbols = map[string][]Symbol{
|
||||
{"(AddrPort).Port", Method, 18},
|
||||
{"(AddrPort).String", Method, 18},
|
||||
{"(Prefix).Addr", Method, 18},
|
||||
{"(Prefix).AppendBinary", Method, 24},
|
||||
{"(Prefix).AppendText", Method, 24},
|
||||
{"(Prefix).AppendTo", Method, 18},
|
||||
{"(Prefix).Bits", Method, 18},
|
||||
{"(Prefix).Contains", Method, 18},
|
||||
@ -8868,6 +9049,7 @@ var PackageSymbols = map[string][]Symbol{
|
||||
{"(*Error).Temporary", Method, 6},
|
||||
{"(*Error).Timeout", Method, 6},
|
||||
{"(*Error).Unwrap", Method, 13},
|
||||
{"(*URL).AppendBinary", Method, 24},
|
||||
{"(*URL).EscapedFragment", Method, 15},
|
||||
{"(*URL).EscapedPath", Method, 5},
|
||||
{"(*URL).Hostname", Method, 8},
|
||||
@ -8967,6 +9149,17 @@ var PackageSymbols = map[string][]Symbol{
|
||||
{"(*ProcessState).SysUsage", Method, 0},
|
||||
{"(*ProcessState).SystemTime", Method, 0},
|
||||
{"(*ProcessState).UserTime", Method, 0},
|
||||
{"(*Root).Close", Method, 24},
|
||||
{"(*Root).Create", Method, 24},
|
||||
{"(*Root).FS", Method, 24},
|
||||
{"(*Root).Lstat", Method, 24},
|
||||
{"(*Root).Mkdir", Method, 24},
|
||||
{"(*Root).Name", Method, 24},
|
||||
{"(*Root).Open", Method, 24},
|
||||
{"(*Root).OpenFile", Method, 24},
|
||||
{"(*Root).OpenRoot", Method, 24},
|
||||
{"(*Root).Remove", Method, 24},
|
||||
{"(*Root).Stat", Method, 24},
|
||||
{"(*SyscallError).Error", Method, 0},
|
||||
{"(*SyscallError).Timeout", Method, 10},
|
||||
{"(*SyscallError).Unwrap", Method, 13},
|
||||
@ -9060,6 +9253,8 @@ var PackageSymbols = map[string][]Symbol{
|
||||
{"O_WRONLY", Const, 0},
|
||||
{"Open", Func, 0},
|
||||
{"OpenFile", Func, 0},
|
||||
{"OpenInRoot", Func, 24},
|
||||
{"OpenRoot", Func, 24},
|
||||
{"PathError", Type, 0},
|
||||
{"PathError.Err", Field, 0},
|
||||
{"PathError.Op", Field, 0},
|
||||
@ -9081,6 +9276,7 @@ var PackageSymbols = map[string][]Symbol{
|
||||
{"Remove", Func, 0},
|
||||
{"RemoveAll", Func, 0},
|
||||
{"Rename", Func, 0},
|
||||
{"Root", Type, 24},
|
||||
{"SEEK_CUR", Const, 0},
|
||||
{"SEEK_END", Const, 0},
|
||||
{"SEEK_SET", Const, 0},
|
||||
@ -9422,6 +9618,7 @@ var PackageSymbols = map[string][]Symbol{
|
||||
{"Zero", Func, 0},
|
||||
},
|
||||
"regexp": {
|
||||
{"(*Regexp).AppendText", Method, 24},
|
||||
{"(*Regexp).Copy", Method, 6},
|
||||
{"(*Regexp).Expand", Method, 0},
|
||||
{"(*Regexp).ExpandString", Method, 0},
|
||||
@ -9602,6 +9799,8 @@ var PackageSymbols = map[string][]Symbol{
|
||||
{"(*StackRecord).Stack", Method, 0},
|
||||
{"(*TypeAssertionError).Error", Method, 0},
|
||||
{"(*TypeAssertionError).RuntimeError", Method, 0},
|
||||
{"(Cleanup).Stop", Method, 24},
|
||||
{"AddCleanup", Func, 24},
|
||||
{"BlockProfile", Func, 1},
|
||||
{"BlockProfileRecord", Type, 1},
|
||||
{"BlockProfileRecord.Count", Field, 1},
|
||||
@ -9612,6 +9811,7 @@ var PackageSymbols = map[string][]Symbol{
|
||||
{"Caller", Func, 0},
|
||||
{"Callers", Func, 0},
|
||||
{"CallersFrames", Func, 7},
|
||||
{"Cleanup", Type, 24},
|
||||
{"Compiler", Const, 0},
|
||||
{"Error", Type, 0},
|
||||
{"Frame", Type, 7},
|
||||
@ -9974,6 +10174,8 @@ var PackageSymbols = map[string][]Symbol{
|
||||
{"EqualFold", Func, 0},
|
||||
{"Fields", Func, 0},
|
||||
{"FieldsFunc", Func, 0},
|
||||
{"FieldsFuncSeq", Func, 24},
|
||||
{"FieldsSeq", Func, 24},
|
||||
{"HasPrefix", Func, 0},
|
||||
{"HasSuffix", Func, 0},
|
||||
{"Index", Func, 0},
|
||||
@ -9986,6 +10188,7 @@ var PackageSymbols = map[string][]Symbol{
|
||||
{"LastIndexAny", Func, 0},
|
||||
{"LastIndexByte", Func, 5},
|
||||
{"LastIndexFunc", Func, 0},
|
||||
{"Lines", Func, 24},
|
||||
{"Map", Func, 0},
|
||||
{"NewReader", Func, 0},
|
||||
{"NewReplacer", Func, 0},
|
||||
@ -9997,7 +10200,9 @@ var PackageSymbols = map[string][]Symbol{
|
||||
{"Split", Func, 0},
|
||||
{"SplitAfter", Func, 0},
|
||||
{"SplitAfterN", Func, 0},
|
||||
{"SplitAfterSeq", Func, 24},
|
||||
{"SplitN", Func, 0},
|
||||
{"SplitSeq", Func, 24},
|
||||
{"Title", Func, 0},
|
||||
{"ToLower", Func, 0},
|
||||
{"ToLowerSpecial", Func, 0},
|
||||
@ -16413,7 +16618,9 @@ var PackageSymbols = map[string][]Symbol{
|
||||
{"ValueOf", Func, 0},
|
||||
},
|
||||
"testing": {
|
||||
{"(*B).Chdir", Method, 24},
|
||||
{"(*B).Cleanup", Method, 14},
|
||||
{"(*B).Context", Method, 24},
|
||||
{"(*B).Elapsed", Method, 20},
|
||||
{"(*B).Error", Method, 0},
|
||||
{"(*B).Errorf", Method, 0},
|
||||
@ -16425,6 +16632,7 @@ var PackageSymbols = map[string][]Symbol{
|
||||
{"(*B).Helper", Method, 9},
|
||||
{"(*B).Log", Method, 0},
|
||||
{"(*B).Logf", Method, 0},
|
||||
{"(*B).Loop", Method, 24},
|
||||
{"(*B).Name", Method, 8},
|
||||
{"(*B).ReportAllocs", Method, 1},
|
||||
{"(*B).ReportMetric", Method, 13},
|
||||
@ -16442,7 +16650,9 @@ var PackageSymbols = map[string][]Symbol{
|
||||
{"(*B).StopTimer", Method, 0},
|
||||
{"(*B).TempDir", Method, 15},
|
||||
{"(*F).Add", Method, 18},
|
||||
{"(*F).Chdir", Method, 24},
|
||||
{"(*F).Cleanup", Method, 18},
|
||||
{"(*F).Context", Method, 24},
|
||||
{"(*F).Error", Method, 18},
|
||||
{"(*F).Errorf", Method, 18},
|
||||
{"(*F).Fail", Method, 18},
|
||||
@ -16463,7 +16673,9 @@ var PackageSymbols = map[string][]Symbol{
|
||||
{"(*F).TempDir", Method, 18},
|
||||
{"(*M).Run", Method, 4},
|
||||
{"(*PB).Next", Method, 3},
|
||||
{"(*T).Chdir", Method, 24},
|
||||
{"(*T).Cleanup", Method, 14},
|
||||
{"(*T).Context", Method, 24},
|
||||
{"(*T).Deadline", Method, 15},
|
||||
{"(*T).Error", Method, 0},
|
||||
{"(*T).Errorf", Method, 0},
|
||||
@ -16954,7 +17166,9 @@ var PackageSymbols = map[string][]Symbol{
|
||||
{"(Time).Add", Method, 0},
|
||||
{"(Time).AddDate", Method, 0},
|
||||
{"(Time).After", Method, 0},
|
||||
{"(Time).AppendBinary", Method, 24},
|
||||
{"(Time).AppendFormat", Method, 5},
|
||||
{"(Time).AppendText", Method, 24},
|
||||
{"(Time).Before", Method, 0},
|
||||
{"(Time).Clock", Method, 0},
|
||||
{"(Time).Compare", Method, 20},
|
||||
@ -17428,4 +17642,9 @@ var PackageSymbols = map[string][]Symbol{
|
||||
{"String", Func, 0},
|
||||
{"StringData", Func, 0},
|
||||
},
|
||||
"weak": {
|
||||
{"(Pointer).Value", Method, 24},
|
||||
{"Make", Func, 24},
|
||||
{"Pointer", Type, 24},
|
||||
},
|
||||
}
|
||||
|
72
vendor/golang.org/x/tools/internal/typeparams/common.go
generated
vendored
72
vendor/golang.org/x/tools/internal/typeparams/common.go
generated
vendored
@ -66,75 +66,3 @@ func IsTypeParam(t types.Type) bool {
|
||||
_, ok := types.Unalias(t).(*types.TypeParam)
|
||||
return ok
|
||||
}
|
||||
|
||||
// GenericAssignableTo is a generalization of types.AssignableTo that
|
||||
// implements the following rule for uninstantiated generic types:
|
||||
//
|
||||
// If V and T are generic named types, then V is considered assignable to T if,
|
||||
// for every possible instantiation of V[A_1, ..., A_N], the instantiation
|
||||
// T[A_1, ..., A_N] is valid and V[A_1, ..., A_N] implements T[A_1, ..., A_N].
|
||||
//
|
||||
// If T has structural constraints, they must be satisfied by V.
|
||||
//
|
||||
// For example, consider the following type declarations:
|
||||
//
|
||||
// type Interface[T any] interface {
|
||||
// Accept(T)
|
||||
// }
|
||||
//
|
||||
// type Container[T any] struct {
|
||||
// Element T
|
||||
// }
|
||||
//
|
||||
// func (c Container[T]) Accept(t T) { c.Element = t }
|
||||
//
|
||||
// In this case, GenericAssignableTo reports that instantiations of Container
|
||||
// are assignable to the corresponding instantiation of Interface.
|
||||
func GenericAssignableTo(ctxt *types.Context, V, T types.Type) bool {
|
||||
V = types.Unalias(V)
|
||||
T = types.Unalias(T)
|
||||
|
||||
// If V and T are not both named, or do not have matching non-empty type
|
||||
// parameter lists, fall back on types.AssignableTo.
|
||||
|
||||
VN, Vnamed := V.(*types.Named)
|
||||
TN, Tnamed := T.(*types.Named)
|
||||
if !Vnamed || !Tnamed {
|
||||
return types.AssignableTo(V, T)
|
||||
}
|
||||
|
||||
vtparams := VN.TypeParams()
|
||||
ttparams := TN.TypeParams()
|
||||
if vtparams.Len() == 0 || vtparams.Len() != ttparams.Len() || VN.TypeArgs().Len() != 0 || TN.TypeArgs().Len() != 0 {
|
||||
return types.AssignableTo(V, T)
|
||||
}
|
||||
|
||||
// V and T have the same (non-zero) number of type params. Instantiate both
|
||||
// with the type parameters of V. This must always succeed for V, and will
|
||||
// succeed for T if and only if the type set of each type parameter of V is a
|
||||
// subset of the type set of the corresponding type parameter of T, meaning
|
||||
// that every instantiation of V corresponds to a valid instantiation of T.
|
||||
|
||||
// Minor optimization: ensure we share a context across the two
|
||||
// instantiations below.
|
||||
if ctxt == nil {
|
||||
ctxt = types.NewContext()
|
||||
}
|
||||
|
||||
var targs []types.Type
|
||||
for i := 0; i < vtparams.Len(); i++ {
|
||||
targs = append(targs, vtparams.At(i))
|
||||
}
|
||||
|
||||
vinst, err := types.Instantiate(ctxt, V, targs, true)
|
||||
if err != nil {
|
||||
panic("type parameters should satisfy their own constraints")
|
||||
}
|
||||
|
||||
tinst, err := types.Instantiate(ctxt, T, targs, true)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
return types.AssignableTo(vinst, tinst)
|
||||
}
|
||||
|
46
vendor/golang.org/x/tools/internal/typesinternal/qualifier.go
generated
vendored
Normal file
46
vendor/golang.org/x/tools/internal/typesinternal/qualifier.go
generated
vendored
Normal file
@ -0,0 +1,46 @@
|
||||
// Copyright 2024 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package typesinternal
|
||||
|
||||
import (
|
||||
"go/ast"
|
||||
"go/types"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// FileQualifier returns a [types.Qualifier] function that qualifies
|
||||
// imported symbols appropriately based on the import environment of a given
|
||||
// file.
|
||||
// If the same package is imported multiple times, the last appearance is
|
||||
// recorded.
|
||||
func FileQualifier(f *ast.File, pkg *types.Package) types.Qualifier {
|
||||
// Construct mapping of import paths to their defined names.
|
||||
// It is only necessary to look at renaming imports.
|
||||
imports := make(map[string]string)
|
||||
for _, imp := range f.Imports {
|
||||
if imp.Name != nil && imp.Name.Name != "_" {
|
||||
path, _ := strconv.Unquote(imp.Path.Value)
|
||||
imports[path] = imp.Name.Name
|
||||
}
|
||||
}
|
||||
|
||||
// Define qualifier to replace full package paths with names of the imports.
|
||||
return func(p *types.Package) string {
|
||||
if p == nil || p == pkg {
|
||||
return ""
|
||||
}
|
||||
|
||||
if name, ok := imports[p.Path()]; ok {
|
||||
if name == "." {
|
||||
return ""
|
||||
} else {
|
||||
return name
|
||||
}
|
||||
}
|
||||
|
||||
// If there is no local renaming, fall back to the package name.
|
||||
return p.Name()
|
||||
}
|
||||
}
|
2
vendor/golang.org/x/tools/internal/typesinternal/recv.go
generated
vendored
2
vendor/golang.org/x/tools/internal/typesinternal/recv.go
generated
vendored
@ -11,6 +11,8 @@ import (
|
||||
// ReceiverNamed returns the named type (if any) associated with the
|
||||
// type of recv, which may be of the form N or *N, or aliases thereof.
|
||||
// It also reports whether a Pointer was present.
|
||||
//
|
||||
// The named result may be nil in ill-typed code.
|
||||
func ReceiverNamed(recv *types.Var) (isPtr bool, named *types.Named) {
|
||||
t := recv.Type()
|
||||
if ptr, ok := types.Unalias(t).(*types.Pointer); ok {
|
||||
|
1
vendor/golang.org/x/tools/internal/typesinternal/types.go
generated
vendored
1
vendor/golang.org/x/tools/internal/typesinternal/types.go
generated
vendored
@ -82,6 +82,7 @@ func NameRelativeTo(pkg *types.Package) types.Qualifier {
|
||||
type NamedOrAlias interface {
|
||||
types.Type
|
||||
Obj() *types.TypeName
|
||||
// TODO(hxjiang): add method TypeArgs() *types.TypeList after stop supporting go1.22.
|
||||
}
|
||||
|
||||
// TypeParams is a light shim around t.TypeParams().
|
||||
|
258
vendor/golang.org/x/tools/internal/typesinternal/zerovalue.go
generated
vendored
258
vendor/golang.org/x/tools/internal/typesinternal/zerovalue.go
generated
vendored
@ -9,62 +9,97 @@ import (
|
||||
"go/ast"
|
||||
"go/token"
|
||||
"go/types"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// ZeroString returns the string representation of the "zero" value of the type t.
|
||||
// This string can be used on the right-hand side of an assignment where the
|
||||
// left-hand side has that explicit type.
|
||||
// Exception: This does not apply to tuples. Their string representation is
|
||||
// informational only and cannot be used in an assignment.
|
||||
// ZeroString returns the string representation of the zero value for any type t.
|
||||
// The boolean result indicates whether the type is or contains an invalid type
|
||||
// or a non-basic (constraint) interface type.
|
||||
//
|
||||
// Even for invalid input types, ZeroString may return a partially correct
|
||||
// string representation. The caller should use the returned isValid boolean
|
||||
// to determine the validity of the expression.
|
||||
//
|
||||
// When assigning to a wider type (such as 'any'), it's the caller's
|
||||
// responsibility to handle any necessary type conversions.
|
||||
//
|
||||
// This string can be used on the right-hand side of an assignment where the
|
||||
// left-hand side has that explicit type.
|
||||
// References to named types are qualified by an appropriate (optional)
|
||||
// qualifier function.
|
||||
// Exception: This does not apply to tuples. Their string representation is
|
||||
// informational only and cannot be used in an assignment.
|
||||
//
|
||||
// See [ZeroExpr] for a variant that returns an [ast.Expr].
|
||||
func ZeroString(t types.Type, qf types.Qualifier) string {
|
||||
func ZeroString(t types.Type, qual types.Qualifier) (_ string, isValid bool) {
|
||||
switch t := t.(type) {
|
||||
case *types.Basic:
|
||||
switch {
|
||||
case t.Info()&types.IsBoolean != 0:
|
||||
return "false"
|
||||
return "false", true
|
||||
case t.Info()&types.IsNumeric != 0:
|
||||
return "0"
|
||||
return "0", true
|
||||
case t.Info()&types.IsString != 0:
|
||||
return `""`
|
||||
return `""`, true
|
||||
case t.Kind() == types.UnsafePointer:
|
||||
fallthrough
|
||||
case t.Kind() == types.UntypedNil:
|
||||
return "nil"
|
||||
return "nil", true
|
||||
case t.Kind() == types.Invalid:
|
||||
return "invalid", false
|
||||
default:
|
||||
panic(fmt.Sprint("ZeroString for unexpected type:", t))
|
||||
panic(fmt.Sprintf("ZeroString for unexpected type %v", t))
|
||||
}
|
||||
|
||||
case *types.Pointer, *types.Slice, *types.Interface, *types.Chan, *types.Map, *types.Signature:
|
||||
return "nil"
|
||||
case *types.Pointer, *types.Slice, *types.Chan, *types.Map, *types.Signature:
|
||||
return "nil", true
|
||||
|
||||
case *types.Named, *types.Alias:
|
||||
case *types.Interface:
|
||||
if !t.IsMethodSet() {
|
||||
return "invalid", false
|
||||
}
|
||||
return "nil", true
|
||||
|
||||
case *types.Named:
|
||||
switch under := t.Underlying().(type) {
|
||||
case *types.Struct, *types.Array:
|
||||
return types.TypeString(t, qf) + "{}"
|
||||
return types.TypeString(t, qual) + "{}", true
|
||||
default:
|
||||
return ZeroString(under, qf)
|
||||
return ZeroString(under, qual)
|
||||
}
|
||||
|
||||
case *types.Alias:
|
||||
switch t.Underlying().(type) {
|
||||
case *types.Struct, *types.Array:
|
||||
return types.TypeString(t, qual) + "{}", true
|
||||
default:
|
||||
// A type parameter can have alias but alias type's underlying type
|
||||
// can never be a type parameter.
|
||||
// Use types.Unalias to preserve the info of type parameter instead
|
||||
// of call Underlying() going right through and get the underlying
|
||||
// type of the type parameter which is always an interface.
|
||||
return ZeroString(types.Unalias(t), qual)
|
||||
}
|
||||
|
||||
case *types.Array, *types.Struct:
|
||||
return types.TypeString(t, qf) + "{}"
|
||||
return types.TypeString(t, qual) + "{}", true
|
||||
|
||||
case *types.TypeParam:
|
||||
// Assumes func new is not shadowed.
|
||||
return "*new(" + types.TypeString(t, qf) + ")"
|
||||
return "*new(" + types.TypeString(t, qual) + ")", true
|
||||
|
||||
case *types.Tuple:
|
||||
// Tuples are not normal values.
|
||||
// We are currently format as "(t[0], ..., t[n])". Could be something else.
|
||||
isValid := true
|
||||
components := make([]string, t.Len())
|
||||
for i := 0; i < t.Len(); i++ {
|
||||
components[i] = ZeroString(t.At(i).Type(), qf)
|
||||
comp, ok := ZeroString(t.At(i).Type(), qual)
|
||||
|
||||
components[i] = comp
|
||||
isValid = isValid && ok
|
||||
}
|
||||
return "(" + strings.Join(components, ", ") + ")"
|
||||
return "(" + strings.Join(components, ", ") + ")", isValid
|
||||
|
||||
case *types.Union:
|
||||
// Variables of these types cannot be created, so it makes
|
||||
@ -76,45 +111,72 @@ func ZeroString(t types.Type, qf types.Qualifier) string {
|
||||
}
|
||||
}
|
||||
|
||||
// ZeroExpr returns the ast.Expr representation of the "zero" value of the type t.
|
||||
// ZeroExpr is defined for types that are suitable for variables.
|
||||
// It may panic for other types such as Tuple or Union.
|
||||
// ZeroExpr returns the ast.Expr representation of the zero value for any type t.
|
||||
// The boolean result indicates whether the type is or contains an invalid type
|
||||
// or a non-basic (constraint) interface type.
|
||||
//
|
||||
// Even for invalid input types, ZeroExpr may return a partially correct ast.Expr
|
||||
// representation. The caller should use the returned isValid boolean to determine
|
||||
// the validity of the expression.
|
||||
//
|
||||
// This function is designed for types suitable for variables and should not be
|
||||
// used with Tuple or Union types.References to named types are qualified by an
|
||||
// appropriate (optional) qualifier function.
|
||||
//
|
||||
// See [ZeroString] for a variant that returns a string.
|
||||
func ZeroExpr(f *ast.File, pkg *types.Package, typ types.Type) ast.Expr {
|
||||
switch t := typ.(type) {
|
||||
func ZeroExpr(t types.Type, qual types.Qualifier) (_ ast.Expr, isValid bool) {
|
||||
switch t := t.(type) {
|
||||
case *types.Basic:
|
||||
switch {
|
||||
case t.Info()&types.IsBoolean != 0:
|
||||
return &ast.Ident{Name: "false"}
|
||||
return &ast.Ident{Name: "false"}, true
|
||||
case t.Info()&types.IsNumeric != 0:
|
||||
return &ast.BasicLit{Kind: token.INT, Value: "0"}
|
||||
return &ast.BasicLit{Kind: token.INT, Value: "0"}, true
|
||||
case t.Info()&types.IsString != 0:
|
||||
return &ast.BasicLit{Kind: token.STRING, Value: `""`}
|
||||
return &ast.BasicLit{Kind: token.STRING, Value: `""`}, true
|
||||
case t.Kind() == types.UnsafePointer:
|
||||
fallthrough
|
||||
case t.Kind() == types.UntypedNil:
|
||||
return ast.NewIdent("nil")
|
||||
return ast.NewIdent("nil"), true
|
||||
case t.Kind() == types.Invalid:
|
||||
return &ast.BasicLit{Kind: token.STRING, Value: `"invalid"`}, false
|
||||
default:
|
||||
panic(fmt.Sprint("ZeroExpr for unexpected type:", t))
|
||||
panic(fmt.Sprintf("ZeroExpr for unexpected type %v", t))
|
||||
}
|
||||
|
||||
case *types.Pointer, *types.Slice, *types.Interface, *types.Chan, *types.Map, *types.Signature:
|
||||
return ast.NewIdent("nil")
|
||||
case *types.Pointer, *types.Slice, *types.Chan, *types.Map, *types.Signature:
|
||||
return ast.NewIdent("nil"), true
|
||||
|
||||
case *types.Named, *types.Alias:
|
||||
case *types.Interface:
|
||||
if !t.IsMethodSet() {
|
||||
return &ast.BasicLit{Kind: token.STRING, Value: `"invalid"`}, false
|
||||
}
|
||||
return ast.NewIdent("nil"), true
|
||||
|
||||
case *types.Named:
|
||||
switch under := t.Underlying().(type) {
|
||||
case *types.Struct, *types.Array:
|
||||
return &ast.CompositeLit{
|
||||
Type: TypeExpr(f, pkg, typ),
|
||||
}
|
||||
Type: TypeExpr(t, qual),
|
||||
}, true
|
||||
default:
|
||||
return ZeroExpr(f, pkg, under)
|
||||
return ZeroExpr(under, qual)
|
||||
}
|
||||
|
||||
case *types.Alias:
|
||||
switch t.Underlying().(type) {
|
||||
case *types.Struct, *types.Array:
|
||||
return &ast.CompositeLit{
|
||||
Type: TypeExpr(t, qual),
|
||||
}, true
|
||||
default:
|
||||
return ZeroExpr(types.Unalias(t), qual)
|
||||
}
|
||||
|
||||
case *types.Array, *types.Struct:
|
||||
return &ast.CompositeLit{
|
||||
Type: TypeExpr(f, pkg, typ),
|
||||
}
|
||||
Type: TypeExpr(t, qual),
|
||||
}, true
|
||||
|
||||
case *types.TypeParam:
|
||||
return &ast.StarExpr{ // *new(T)
|
||||
@ -125,7 +187,7 @@ func ZeroExpr(f *ast.File, pkg *types.Package, typ types.Type) ast.Expr {
|
||||
ast.NewIdent(t.Obj().Name()),
|
||||
},
|
||||
},
|
||||
}
|
||||
}, true
|
||||
|
||||
case *types.Tuple:
|
||||
// Unlike ZeroString, there is no ast.Expr can express tuple by
|
||||
@ -157,16 +219,14 @@ func IsZeroExpr(expr ast.Expr) bool {
|
||||
}
|
||||
|
||||
// TypeExpr returns syntax for the specified type. References to named types
|
||||
// from packages other than pkg are qualified by an appropriate package name, as
|
||||
// defined by the import environment of file.
|
||||
// are qualified by an appropriate (optional) qualifier function.
|
||||
// It may panic for types such as Tuple or Union.
|
||||
func TypeExpr(f *ast.File, pkg *types.Package, typ types.Type) ast.Expr {
|
||||
switch t := typ.(type) {
|
||||
func TypeExpr(t types.Type, qual types.Qualifier) ast.Expr {
|
||||
switch t := t.(type) {
|
||||
case *types.Basic:
|
||||
switch t.Kind() {
|
||||
case types.UnsafePointer:
|
||||
// TODO(hxjiang): replace the implementation with types.Qualifier.
|
||||
return &ast.SelectorExpr{X: ast.NewIdent("unsafe"), Sel: ast.NewIdent("Pointer")}
|
||||
return &ast.SelectorExpr{X: ast.NewIdent(qual(types.NewPackage("unsafe", "unsafe"))), Sel: ast.NewIdent("Pointer")}
|
||||
default:
|
||||
return ast.NewIdent(t.Name())
|
||||
}
|
||||
@ -174,7 +234,7 @@ func TypeExpr(f *ast.File, pkg *types.Package, typ types.Type) ast.Expr {
|
||||
case *types.Pointer:
|
||||
return &ast.UnaryExpr{
|
||||
Op: token.MUL,
|
||||
X: TypeExpr(f, pkg, t.Elem()),
|
||||
X: TypeExpr(t.Elem(), qual),
|
||||
}
|
||||
|
||||
case *types.Array:
|
||||
@ -183,18 +243,18 @@ func TypeExpr(f *ast.File, pkg *types.Package, typ types.Type) ast.Expr {
|
||||
Kind: token.INT,
|
||||
Value: fmt.Sprintf("%d", t.Len()),
|
||||
},
|
||||
Elt: TypeExpr(f, pkg, t.Elem()),
|
||||
Elt: TypeExpr(t.Elem(), qual),
|
||||
}
|
||||
|
||||
case *types.Slice:
|
||||
return &ast.ArrayType{
|
||||
Elt: TypeExpr(f, pkg, t.Elem()),
|
||||
Elt: TypeExpr(t.Elem(), qual),
|
||||
}
|
||||
|
||||
case *types.Map:
|
||||
return &ast.MapType{
|
||||
Key: TypeExpr(f, pkg, t.Key()),
|
||||
Value: TypeExpr(f, pkg, t.Elem()),
|
||||
Key: TypeExpr(t.Key(), qual),
|
||||
Value: TypeExpr(t.Elem(), qual),
|
||||
}
|
||||
|
||||
case *types.Chan:
|
||||
@ -204,14 +264,14 @@ func TypeExpr(f *ast.File, pkg *types.Package, typ types.Type) ast.Expr {
|
||||
}
|
||||
return &ast.ChanType{
|
||||
Dir: dir,
|
||||
Value: TypeExpr(f, pkg, t.Elem()),
|
||||
Value: TypeExpr(t.Elem(), qual),
|
||||
}
|
||||
|
||||
case *types.Signature:
|
||||
var params []*ast.Field
|
||||
for i := 0; i < t.Params().Len(); i++ {
|
||||
params = append(params, &ast.Field{
|
||||
Type: TypeExpr(f, pkg, t.Params().At(i).Type()),
|
||||
Type: TypeExpr(t.Params().At(i).Type(), qual),
|
||||
Names: []*ast.Ident{
|
||||
{
|
||||
Name: t.Params().At(i).Name(),
|
||||
@ -226,7 +286,7 @@ func TypeExpr(f *ast.File, pkg *types.Package, typ types.Type) ast.Expr {
|
||||
var returns []*ast.Field
|
||||
for i := 0; i < t.Results().Len(); i++ {
|
||||
returns = append(returns, &ast.Field{
|
||||
Type: TypeExpr(f, pkg, t.Results().At(i).Type()),
|
||||
Type: TypeExpr(t.Results().At(i).Type(), qual),
|
||||
})
|
||||
}
|
||||
return &ast.FuncType{
|
||||
@ -238,23 +298,9 @@ func TypeExpr(f *ast.File, pkg *types.Package, typ types.Type) ast.Expr {
|
||||
},
|
||||
}
|
||||
|
||||
case interface{ Obj() *types.TypeName }: // *types.{Alias,Named,TypeParam}
|
||||
switch t.Obj().Pkg() {
|
||||
case pkg, nil:
|
||||
return ast.NewIdent(t.Obj().Name())
|
||||
}
|
||||
pkgName := t.Obj().Pkg().Name()
|
||||
|
||||
// TODO(hxjiang): replace the implementation with types.Qualifier.
|
||||
// If the file already imports the package under another name, use that.
|
||||
for _, cand := range f.Imports {
|
||||
if path, _ := strconv.Unquote(cand.Path.Value); path == t.Obj().Pkg().Path() {
|
||||
if cand.Name != nil && cand.Name.Name != "" {
|
||||
pkgName = cand.Name.Name
|
||||
}
|
||||
}
|
||||
}
|
||||
if pkgName == "." {
|
||||
case *types.TypeParam:
|
||||
pkgName := qual(t.Obj().Pkg())
|
||||
if pkgName == "" || t.Obj().Pkg() == nil {
|
||||
return ast.NewIdent(t.Obj().Name())
|
||||
}
|
||||
return &ast.SelectorExpr{
|
||||
@ -262,6 +308,36 @@ func TypeExpr(f *ast.File, pkg *types.Package, typ types.Type) ast.Expr {
|
||||
Sel: ast.NewIdent(t.Obj().Name()),
|
||||
}
|
||||
|
||||
// types.TypeParam also implements interface NamedOrAlias. To differentiate,
|
||||
// case TypeParam need to be present before case NamedOrAlias.
|
||||
// TODO(hxjiang): remove this comment once TypeArgs() is added to interface
|
||||
// NamedOrAlias.
|
||||
case NamedOrAlias:
|
||||
var expr ast.Expr = ast.NewIdent(t.Obj().Name())
|
||||
if pkgName := qual(t.Obj().Pkg()); pkgName != "." && pkgName != "" {
|
||||
expr = &ast.SelectorExpr{
|
||||
X: ast.NewIdent(pkgName),
|
||||
Sel: expr.(*ast.Ident),
|
||||
}
|
||||
}
|
||||
|
||||
// TODO(hxjiang): call t.TypeArgs after adding method TypeArgs() to
|
||||
// typesinternal.NamedOrAlias.
|
||||
if hasTypeArgs, ok := t.(interface{ TypeArgs() *types.TypeList }); ok {
|
||||
if typeArgs := hasTypeArgs.TypeArgs(); typeArgs != nil && typeArgs.Len() > 0 {
|
||||
var indices []ast.Expr
|
||||
for i := range typeArgs.Len() {
|
||||
indices = append(indices, TypeExpr(typeArgs.At(i), qual))
|
||||
}
|
||||
expr = &ast.IndexListExpr{
|
||||
X: expr,
|
||||
Indices: indices,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return expr
|
||||
|
||||
case *types.Struct:
|
||||
return ast.NewIdent(t.String())
|
||||
|
||||
@ -269,9 +345,43 @@ func TypeExpr(f *ast.File, pkg *types.Package, typ types.Type) ast.Expr {
|
||||
return ast.NewIdent(t.String())
|
||||
|
||||
case *types.Union:
|
||||
// TODO(hxjiang): handle the union through syntax (~A | ... | ~Z).
|
||||
// Remove nil check when calling typesinternal.TypeExpr.
|
||||
return nil
|
||||
if t.Len() == 0 {
|
||||
panic("Union type should have at least one term")
|
||||
}
|
||||
// Same as go/ast, the return expression will put last term in the
|
||||
// Y field at topmost level of BinaryExpr.
|
||||
// For union of type "float32 | float64 | int64", the structure looks
|
||||
// similar to:
|
||||
// {
|
||||
// X: {
|
||||
// X: float32,
|
||||
// Op: |
|
||||
// Y: float64,
|
||||
// }
|
||||
// Op: |,
|
||||
// Y: int64,
|
||||
// }
|
||||
var union ast.Expr
|
||||
for i := range t.Len() {
|
||||
term := t.Term(i)
|
||||
termExpr := TypeExpr(term.Type(), qual)
|
||||
if term.Tilde() {
|
||||
termExpr = &ast.UnaryExpr{
|
||||
Op: token.TILDE,
|
||||
X: termExpr,
|
||||
}
|
||||
}
|
||||
if i == 0 {
|
||||
union = termExpr
|
||||
} else {
|
||||
union = &ast.BinaryExpr{
|
||||
X: union,
|
||||
Op: token.OR,
|
||||
Y: termExpr,
|
||||
}
|
||||
}
|
||||
}
|
||||
return union
|
||||
|
||||
case *types.Tuple:
|
||||
panic("invalid input type types.Tuple")
|
||||
|
24
vendor/google.golang.org/protobuf/internal/impl/message_opaque.go
generated
vendored
24
vendor/google.golang.org/protobuf/internal/impl/message_opaque.go
generated
vendored
@ -88,9 +88,7 @@ func opaqueInitHook(mi *MessageInfo) bool {
|
||||
mi.oneofs = map[protoreflect.Name]*oneofInfo{}
|
||||
for i := 0; i < mi.Desc.Oneofs().Len(); i++ {
|
||||
od := mi.Desc.Oneofs().Get(i)
|
||||
if !od.IsSynthetic() {
|
||||
mi.oneofs[od.Name()] = makeOneofInfo(od, si.structInfo, mi.Exporter)
|
||||
}
|
||||
mi.oneofs[od.Name()] = makeOneofInfoOpaque(mi, od, si.structInfo, mi.Exporter)
|
||||
}
|
||||
|
||||
mi.denseFields = make([]*fieldInfo, fds.Len()*2)
|
||||
@ -119,6 +117,26 @@ func opaqueInitHook(mi *MessageInfo) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func makeOneofInfoOpaque(mi *MessageInfo, od protoreflect.OneofDescriptor, si structInfo, x exporter) *oneofInfo {
|
||||
oi := &oneofInfo{oneofDesc: od}
|
||||
if od.IsSynthetic() {
|
||||
fd := od.Fields().Get(0)
|
||||
index, _ := presenceIndex(mi.Desc, fd)
|
||||
oi.which = func(p pointer) protoreflect.FieldNumber {
|
||||
if p.IsNil() {
|
||||
return 0
|
||||
}
|
||||
if !mi.present(p, index) {
|
||||
return 0
|
||||
}
|
||||
return od.Fields().Get(0).Number()
|
||||
}
|
||||
return oi
|
||||
}
|
||||
// Dispatch to non-opaque oneof implementation for non-synthetic oneofs.
|
||||
return makeOneofInfo(od, si, x)
|
||||
}
|
||||
|
||||
func (mi *MessageInfo) fieldInfoForMapOpaque(si opaqueStructInfo, fd protoreflect.FieldDescriptor, fs reflect.StructField) fieldInfo {
|
||||
ft := fs.Type
|
||||
if ft.Kind() != reflect.Map {
|
||||
|
2
vendor/google.golang.org/protobuf/internal/version/version.go
generated
vendored
2
vendor/google.golang.org/protobuf/internal/version/version.go
generated
vendored
@ -52,7 +52,7 @@ import (
|
||||
const (
|
||||
Major = 1
|
||||
Minor = 36
|
||||
Patch = 1
|
||||
Patch = 2
|
||||
PreRelease = ""
|
||||
)
|
||||
|
||||
|
26
vendor/modules.txt
vendored
26
vendor/modules.txt
vendored
@ -442,7 +442,7 @@ github.com/moby/sys/user
|
||||
# github.com/moby/sys/userns v0.1.0
|
||||
## explicit; go 1.21
|
||||
github.com/moby/sys/userns
|
||||
# github.com/moby/term v0.5.0
|
||||
# github.com/moby/term v0.5.2
|
||||
## explicit; go 1.18
|
||||
github.com/moby/term
|
||||
github.com/moby/term/windows
|
||||
@ -626,15 +626,15 @@ go.opentelemetry.io/otel/sdk/metric/metricdata
|
||||
go.opentelemetry.io/otel/trace
|
||||
go.opentelemetry.io/otel/trace/embedded
|
||||
go.opentelemetry.io/otel/trace/noop
|
||||
# go.opentelemetry.io/proto/otlp v1.4.0
|
||||
## explicit; go 1.22.7
|
||||
# go.opentelemetry.io/proto/otlp v1.5.0
|
||||
## explicit; go 1.22.0
|
||||
go.opentelemetry.io/proto/otlp/collector/metrics/v1
|
||||
go.opentelemetry.io/proto/otlp/collector/trace/v1
|
||||
go.opentelemetry.io/proto/otlp/common/v1
|
||||
go.opentelemetry.io/proto/otlp/metrics/v1
|
||||
go.opentelemetry.io/proto/otlp/resource/v1
|
||||
go.opentelemetry.io/proto/otlp/trace/v1
|
||||
# golang.org/x/crypto v0.31.0
|
||||
# golang.org/x/crypto v0.32.0
|
||||
## explicit; go 1.20
|
||||
golang.org/x/crypto/argon2
|
||||
golang.org/x/crypto/blake2b
|
||||
@ -652,7 +652,7 @@ golang.org/x/crypto/ssh
|
||||
golang.org/x/crypto/ssh/agent
|
||||
golang.org/x/crypto/ssh/internal/bcrypt_pbkdf
|
||||
golang.org/x/crypto/ssh/knownhosts
|
||||
# golang.org/x/exp v0.0.0-20250103183323-7d7fa50e5329
|
||||
# golang.org/x/exp v0.0.0-20250106191152-7588d65b2ba8
|
||||
## explicit; go 1.22.0
|
||||
golang.org/x/exp/slices
|
||||
golang.org/x/exp/slog
|
||||
@ -661,7 +661,7 @@ golang.org/x/exp/slog/internal/buffer
|
||||
# golang.org/x/mod v0.22.0
|
||||
## explicit; go 1.22.0
|
||||
golang.org/x/mod/semver
|
||||
# golang.org/x/net v0.33.0
|
||||
# golang.org/x/net v0.34.0
|
||||
## explicit; go 1.18
|
||||
golang.org/x/net/context
|
||||
golang.org/x/net/http/httpguts
|
||||
@ -675,7 +675,7 @@ golang.org/x/net/trace
|
||||
# golang.org/x/sync v0.10.0
|
||||
## explicit; go 1.18
|
||||
golang.org/x/sync/errgroup
|
||||
# golang.org/x/sys v0.28.0
|
||||
# golang.org/x/sys v0.29.0
|
||||
## explicit; go 1.18
|
||||
golang.org/x/sys/cpu
|
||||
golang.org/x/sys/execabs
|
||||
@ -683,7 +683,7 @@ golang.org/x/sys/plan9
|
||||
golang.org/x/sys/unix
|
||||
golang.org/x/sys/windows
|
||||
golang.org/x/sys/windows/registry
|
||||
# golang.org/x/term v0.27.0
|
||||
# golang.org/x/term v0.28.0
|
||||
## explicit; go 1.18
|
||||
golang.org/x/term
|
||||
# golang.org/x/text v0.21.0
|
||||
@ -699,10 +699,10 @@ golang.org/x/text/transform
|
||||
golang.org/x/text/unicode/bidi
|
||||
golang.org/x/text/unicode/norm
|
||||
golang.org/x/text/width
|
||||
# golang.org/x/time v0.8.0
|
||||
# golang.org/x/time v0.9.0
|
||||
## explicit; go 1.18
|
||||
golang.org/x/time/rate
|
||||
# golang.org/x/tools v0.28.0
|
||||
# golang.org/x/tools v0.29.0
|
||||
## explicit; go 1.22.0
|
||||
golang.org/x/tools/go/gcexportdata
|
||||
golang.org/x/tools/go/packages
|
||||
@ -721,10 +721,10 @@ golang.org/x/tools/internal/stdlib
|
||||
golang.org/x/tools/internal/typeparams
|
||||
golang.org/x/tools/internal/typesinternal
|
||||
golang.org/x/tools/internal/versions
|
||||
# google.golang.org/genproto/googleapis/api v0.0.0-20250102185135-69823020774d
|
||||
# google.golang.org/genproto/googleapis/api v0.0.0-20250106144421-5f5ef82da422
|
||||
## explicit; go 1.22
|
||||
google.golang.org/genproto/googleapis/api/httpbody
|
||||
# google.golang.org/genproto/googleapis/rpc v0.0.0-20250102185135-69823020774d
|
||||
# google.golang.org/genproto/googleapis/rpc v0.0.0-20250106144421-5f5ef82da422
|
||||
## explicit; go 1.22
|
||||
google.golang.org/genproto/googleapis/rpc/errdetails
|
||||
google.golang.org/genproto/googleapis/rpc/status
|
||||
@ -789,7 +789,7 @@ google.golang.org/grpc/serviceconfig
|
||||
google.golang.org/grpc/stats
|
||||
google.golang.org/grpc/status
|
||||
google.golang.org/grpc/tap
|
||||
# google.golang.org/protobuf v1.36.1
|
||||
# google.golang.org/protobuf v1.36.2
|
||||
## explicit; go 1.21
|
||||
google.golang.org/protobuf/encoding/protodelim
|
||||
google.golang.org/protobuf/encoding/protojson
|
||||
|
Reference in New Issue
Block a user