0
0
forked from toolshed/abra

refactor: urfave v3

This commit is contained in:
2024-07-09 13:57:54 +02:00
parent 375e17a4a0
commit 1f8662cd95
336 changed files with 7332 additions and 25145 deletions

View File

@ -1,4 +1,3 @@
//go:build windows
// +build windows
package winterm

View File

@ -1,4 +1,3 @@
//go:build windows
// +build windows
package winterm

View File

@ -1,4 +1,3 @@
//go:build windows
// +build windows
package winterm

View File

@ -1,4 +1,3 @@
//go:build windows
// +build windows
package winterm

View File

@ -1,4 +1,3 @@
//go:build windows
// +build windows
package winterm

View File

@ -1,4 +1,3 @@
//go:build windows
// +build windows
package winterm

View File

@ -1,4 +1,3 @@
//go:build windows
// +build windows
package winterm

View File

@ -1,4 +1,3 @@
//go:build windows
// +build windows
package winterm

View File

@ -259,7 +259,7 @@ func (e *Entity) EncryptPrivateKeys(passphrase []byte, config *packet.Config) er
var keysToEncrypt []*packet.PrivateKey
// Add entity private key to encrypt.
if e.PrivateKey != nil && !e.PrivateKey.Dummy() && !e.PrivateKey.Encrypted {
keysToEncrypt = append(keysToEncrypt, e.PrivateKey)
keysToEncrypt = append(keysToEncrypt, e.PrivateKey)
}
// Add subkeys to encrypt.
@ -284,7 +284,7 @@ func (e *Entity) DecryptPrivateKeys(passphrase []byte) error {
// Add subkeys to decrypt.
for _, sub := range e.Subkeys {
if sub.PrivateKey != nil && !sub.PrivateKey.Dummy() && sub.PrivateKey.Encrypted {
keysToDecrypt = append(keysToDecrypt, sub.PrivateKey)
keysToDecrypt = append(keysToDecrypt, sub.PrivateKey)
}
}
return packet.DecryptPrivateKeys(keysToDecrypt, passphrase)

View File

@ -458,7 +458,7 @@ func (pk *PrivateKey) Decrypt(passphrase []byte) error {
}
// DecryptPrivateKeys decrypts all encrypted keys with the given config and passphrase.
// Avoids recomputation of similar s2k key derivations.
// Avoids recomputation of similar s2k key derivations.
func DecryptPrivateKeys(keys []*PrivateKey, passphrase []byte) error {
// Create a cache to avoid recomputation of key derviations for the same passphrase.
s2kCache := &s2k.Cache{}
@ -485,7 +485,7 @@ func (pk *PrivateKey) encrypt(key []byte, params *s2k.Params, cipherFunction Cip
if len(key) != cipherFunction.KeySize() {
return errors.InvalidArgumentError("supplied encryption key has the wrong size")
}
priv := bytes.NewBuffer(nil)
err := pk.serializePrivateKey(priv)
if err != nil {
@ -497,7 +497,7 @@ func (pk *PrivateKey) encrypt(key []byte, params *s2k.Params, cipherFunction Cip
pk.s2k, err = pk.s2kParams.Function()
if err != nil {
return err
}
}
privateKeyBytes := priv.Bytes()
pk.sha1Checksum = true
@ -581,7 +581,7 @@ func (pk *PrivateKey) Encrypt(passphrase []byte) error {
S2KMode: s2k.IteratedSaltedS2K,
S2KCount: 65536,
Hash: crypto.SHA256,
},
} ,
DefaultCipher: CipherAES256,
}
return pk.EncryptWithConfig(passphrase, config)

View File

@ -87,10 +87,10 @@ func decodeCount(c uint8) int {
// encodeMemory converts the Argon2 "memory" in the range parallelism*8 to
// 2**31, inclusive, to an encoded memory. The return value is the
// octet that is actually stored in the GPG file. encodeMemory panics
// if is not in the above range
// if is not in the above range
// See OpenPGP crypto refresh Section 3.7.1.4.
func encodeMemory(memory uint32, parallelism uint8) uint8 {
if memory < (8*uint32(parallelism)) || memory > uint32(2147483648) {
if memory < (8 * uint32(parallelism)) || memory > uint32(2147483648) {
panic("Memory argument memory is outside the required range")
}
@ -199,8 +199,8 @@ func Generate(rand io.Reader, c *Config) (*Params, error) {
}
params = &Params{
mode: SaltedS2K,
hashId: hashId,
mode: SaltedS2K,
hashId: hashId,
}
} else { // Enforce IteratedSaltedS2K method otherwise
hashId, ok := algorithm.HashToHashId(c.hash())
@ -211,7 +211,7 @@ func Generate(rand io.Reader, c *Config) (*Params, error) {
c.S2KMode = IteratedSaltedS2K
}
params = &Params{
mode: IteratedSaltedS2K,
mode: IteratedSaltedS2K,
hashId: hashId,
countByte: c.EncodedCount(),
}
@ -306,12 +306,9 @@ func (params *Params) Dummy() bool {
func (params *Params) salt() []byte {
switch params.mode {
case SaltedS2K, IteratedSaltedS2K:
return params.saltBytes[:8]
case Argon2S2K:
return params.saltBytes[:Argon2SaltSize]
default:
return nil
case SaltedS2K, IteratedSaltedS2K: return params.saltBytes[:8]
case Argon2S2K: return params.saltBytes[:Argon2SaltSize]
default: return nil
}
}

View File

@ -5,7 +5,7 @@ package s2k
// the same parameters.
type Cache map[Params][]byte
// GetOrComputeDerivedKey tries to retrieve the key
// GetOrComputeDerivedKey tries to retrieve the key
// for the given s2k parameters from the cache.
// If there is no hit, it derives the key with the s2k function from the passphrase,
// updates the cache, and returns the key.

View File

@ -50,9 +50,9 @@ type Config struct {
type Argon2Config struct {
NumberOfPasses uint8
DegreeOfParallelism uint8
// The memory parameter for Argon2 specifies desired memory usage in kibibytes.
// The memory parameter for Argon2 specifies desired memory usage in kibibytes.
// For example memory=64*1024 sets the memory cost to ~64 MB.
Memory uint32
Memory uint32
}
func (c *Config) Mode() Mode {
@ -115,7 +115,7 @@ func (c *Argon2Config) EncodedMemory() uint8 {
}
memory := c.Memory
lowerBound := uint32(c.Parallelism()) * 8
lowerBound := uint32(c.Parallelism())*8
upperBound := uint32(2147483648)
switch {

View File

@ -9,7 +9,7 @@
//
// For more detailed information about the algorithm used, see:
//
// # Effective Computation of Biased Quantiles over Data Streams
// Effective Computation of Biased Quantiles over Data Streams
//
// http://www.cs.rutgers.edu/~muthu/bquant.pdf
package quantile

View File

@ -11,17 +11,17 @@ period for each retry attempt using a randomization function that grows exponent
NextBackOff() is calculated using the following formula:
randomized interval =
RetryInterval * (random value in range [1 - RandomizationFactor, 1 + RandomizationFactor])
randomized interval =
RetryInterval * (random value in range [1 - RandomizationFactor, 1 + RandomizationFactor])
In other words NextBackOff() will range between the randomization factor
percentage below and above the retry interval.
For example, given the following parameters:
RetryInterval = 2
RandomizationFactor = 0.5
Multiplier = 2
RetryInterval = 2
RandomizationFactor = 0.5
Multiplier = 2
the actual backoff period used in the next retry attempt will range between 1 and 3 seconds,
multiplied by the exponential, that is, between 2 and 6 seconds.
@ -36,18 +36,18 @@ The elapsed time can be reset by calling Reset().
Example: Given the following default arguments, for 10 tries the sequence will be,
and assuming we go over the MaxElapsedTime on the 10th try:
Request # RetryInterval (seconds) Randomized Interval (seconds)
Request # RetryInterval (seconds) Randomized Interval (seconds)
1 0.5 [0.25, 0.75]
2 0.75 [0.375, 1.125]
3 1.125 [0.562, 1.687]
4 1.687 [0.8435, 2.53]
5 2.53 [1.265, 3.795]
6 3.795 [1.897, 5.692]
7 5.692 [2.846, 8.538]
8 8.538 [4.269, 12.807]
9 12.807 [6.403, 19.210]
10 19.210 backoff.Stop
1 0.5 [0.25, 0.75]
2 0.75 [0.375, 1.125]
3 1.125 [0.562, 1.687]
4 1.687 [0.8435, 2.53]
5 2.53 [1.265, 3.795]
6 3.795 [1.897, 5.692]
7 5.692 [2.846, 8.538]
8 8.538 [4.269, 12.807]
9 12.807 [6.403, 19.210]
10 19.210 backoff.Stop
Note: Implementation is not thread-safe.
*/
@ -167,8 +167,7 @@ func (b *ExponentialBackOff) Reset() {
}
// NextBackOff calculates the next backoff interval using the formula:
//
// Randomized interval = RetryInterval * (1 ± RandomizationFactor)
// Randomized interval = RetryInterval * (1 ± RandomizationFactor)
func (b *ExponentialBackOff) NextBackOff() time.Duration {
// Make sure we have not gone over the maximum elapsed time.
elapsed := b.GetElapsedTime()
@ -201,8 +200,7 @@ func (b *ExponentialBackOff) incrementCurrentInterval() {
}
// Returns a random value from the following interval:
//
// [currentInterval - randomizationFactor * currentInterval, currentInterval + randomizationFactor * currentInterval].
// [currentInterval - randomizationFactor * currentInterval, currentInterval + randomizationFactor * currentInterval].
func getRandomValueFromInterval(randomizationFactor, random float64, currentInterval time.Duration) time.Duration {
if randomizationFactor == 0 {
return currentInterval // make sure no randomness is used when randomizationFactor is 0.

View File

@ -3,13 +3,13 @@
//
// Grammar
//
// reference := name [ ":" tag ] [ "@" digest ]
// reference := name [ ":" tag ] [ "@" digest ]
// name := [domain '/'] path-component ['/' path-component]*
// domain := domain-component ['.' domain-component]* [':' port-number]
// domain-component := /([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])/
// port-number := /[0-9]+/
// path-component := alpha-numeric [separator alpha-numeric]*
// alpha-numeric := /[a-z0-9]+/
// alpha-numeric := /[a-z0-9]+/
// separator := /[_.]|__|[-]*/
//
// tag := /[\w][\w.-]{0,127}/

View File

@ -1,4 +1,3 @@
//go:build !linux && (!386 || !amd64)
// +build !linux
// +build !386 !amd64

View File

@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build linux
// +build linux
package keyctl

View File

@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build linux
// +build linux
// Package keyctl is a Go interface to linux kernel keyrings (keyctl interface)

View File

@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build linux
// +build linux
package keyctl

View File

@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build linux
// +build linux
package keyctl

View File

@ -131,25 +131,24 @@ type BICReplacementCandidate struct {
// BlobInfoCache records data useful for reusing blobs, or substituing equivalent ones, to avoid unnecessary blob copies.
//
// It records two kinds of data:
// - Sets of corresponding digest vs. uncompressed digest ("DiffID") pairs:
// One of the two digests is known to be uncompressed, and a single uncompressed digest may correspond to more than one compressed digest.
// This allows matching compressed layer blobs to existing local uncompressed layers (to avoid unnecessary download and decompresssion),
// or uncompressed layer blobs to existing remote compressed layers (to avoid unnecessary compression and upload)/
//
// - Sets of corresponding digest vs. uncompressed digest ("DiffID") pairs:
// One of the two digests is known to be uncompressed, and a single uncompressed digest may correspond to more than one compressed digest.
// This allows matching compressed layer blobs to existing local uncompressed layers (to avoid unnecessary download and decompresssion),
// or uncompressed layer blobs to existing remote compressed layers (to avoid unnecessary compression and upload)/
// It is allowed to record an (uncompressed digest, the same uncompressed digest) correspondence, to express that the digest is known
// to be uncompressed (i.e. that a conversion from schema1 does not have to decompress the blob to compute a DiffID value).
//
// It is allowed to record an (uncompressed digest, the same uncompressed digest) correspondence, to express that the digest is known
// to be uncompressed (i.e. that a conversion from schema1 does not have to decompress the blob to compute a DiffID value).
// This mapping is primarily maintained in generic copy.Image code, but transports may want to contribute more data points if they independently
// compress/decompress blobs for their own purposes.
//
// This mapping is primarily maintained in generic copy.Image code, but transports may want to contribute more data points if they independently
// compress/decompress blobs for their own purposes.
// - Known blob locations, managed by individual transports:
// The transports call RecordKnownLocation when encountering a blob that could possibly be reused (typically in GetBlob/PutBlob/TryReusingBlob),
// recording transport-specific information that allows the transport to reuse the blob in the future;
// then, TryReusingBlob implementations can call CandidateLocations to look up previously recorded blob locations that could be reused.
//
// - Known blob locations, managed by individual transports:
// The transports call RecordKnownLocation when encountering a blob that could possibly be reused (typically in GetBlob/PutBlob/TryReusingBlob),
// recording transport-specific information that allows the transport to reuse the blob in the future;
// then, TryReusingBlob implementations can call CandidateLocations to look up previously recorded blob locations that could be reused.
//
// Each transport defines its own “scopes” within which blob reuse is possible (e.g. in, the docker/distribution case, blobs
// can be directly reused within a registry, or mounted across registries within a registry server.)
// Each transport defines its own “scopes” within which blob reuse is possible (e.g. in, the docker/distribution case, blobs
// can be directly reused within a registry, or mounted across registries within a registry server.)
//
// None of the methods return an error indication: errors when neither reading from, nor writing to, the cache, should be fatal;
// users of the cahce should just fall back to copying the blobs the usual way.

View File

@ -1,21 +0,0 @@
The MIT License (MIT)
Copyright (c) 2014 Brian Goff
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.

View File

@ -1,16 +0,0 @@
package md2man
import (
"github.com/russross/blackfriday/v2"
)
// Render converts a markdown document into a roff formatted document.
func Render(doc []byte) []byte {
renderer := NewRoffRenderer()
return blackfriday.Run(doc,
[]blackfriday.Option{
blackfriday.WithRenderer(renderer),
blackfriday.WithExtensions(renderer.GetExtensions()),
}...)
}

View File

@ -1,382 +0,0 @@
package md2man
import (
"bufio"
"bytes"
"fmt"
"io"
"os"
"strings"
"github.com/russross/blackfriday/v2"
)
// roffRenderer implements the blackfriday.Renderer interface for creating
// roff format (manpages) from markdown text
type roffRenderer struct {
extensions blackfriday.Extensions
listCounters []int
firstHeader bool
firstDD bool
listDepth int
}
const (
titleHeader = ".TH "
topLevelHeader = "\n\n.SH "
secondLevelHdr = "\n.SH "
otherHeader = "\n.SS "
crTag = "\n"
emphTag = "\\fI"
emphCloseTag = "\\fP"
strongTag = "\\fB"
strongCloseTag = "\\fP"
breakTag = "\n.br\n"
paraTag = "\n.PP\n"
hruleTag = "\n.ti 0\n\\l'\\n(.lu'\n"
linkTag = "\n\\[la]"
linkCloseTag = "\\[ra]"
codespanTag = "\\fB"
codespanCloseTag = "\\fR"
codeTag = "\n.EX\n"
codeCloseTag = ".EE\n" // Do not prepend a newline character since code blocks, by definition, include a newline already (or at least as how blackfriday gives us on).
quoteTag = "\n.PP\n.RS\n"
quoteCloseTag = "\n.RE\n"
listTag = "\n.RS\n"
listCloseTag = "\n.RE\n"
dtTag = "\n.TP\n"
dd2Tag = "\n"
tableStart = "\n.TS\nallbox;\n"
tableEnd = ".TE\n"
tableCellStart = "T{\n"
tableCellEnd = "\nT}\n"
tablePreprocessor = `'\" t`
)
// NewRoffRenderer creates a new blackfriday Renderer for generating roff documents
// from markdown
func NewRoffRenderer() *roffRenderer { // nolint: golint
var extensions blackfriday.Extensions
extensions |= blackfriday.NoIntraEmphasis
extensions |= blackfriday.Tables
extensions |= blackfriday.FencedCode
extensions |= blackfriday.SpaceHeadings
extensions |= blackfriday.Footnotes
extensions |= blackfriday.Titleblock
extensions |= blackfriday.DefinitionLists
return &roffRenderer{
extensions: extensions,
}
}
// GetExtensions returns the list of extensions used by this renderer implementation
func (r *roffRenderer) GetExtensions() blackfriday.Extensions {
return r.extensions
}
// RenderHeader handles outputting the header at document start
func (r *roffRenderer) RenderHeader(w io.Writer, ast *blackfriday.Node) {
// We need to walk the tree to check if there are any tables.
// If there are, we need to enable the roff table preprocessor.
ast.Walk(func(node *blackfriday.Node, entering bool) blackfriday.WalkStatus {
if node.Type == blackfriday.Table {
out(w, tablePreprocessor+"\n")
return blackfriday.Terminate
}
return blackfriday.GoToNext
})
// disable hyphenation
out(w, ".nh\n")
}
// RenderFooter handles outputting the footer at the document end; the roff
// renderer has no footer information
func (r *roffRenderer) RenderFooter(w io.Writer, ast *blackfriday.Node) {
}
// RenderNode is called for each node in a markdown document; based on the node
// type the equivalent roff output is sent to the writer
func (r *roffRenderer) RenderNode(w io.Writer, node *blackfriday.Node, entering bool) blackfriday.WalkStatus {
walkAction := blackfriday.GoToNext
switch node.Type {
case blackfriday.Text:
escapeSpecialChars(w, node.Literal)
case blackfriday.Softbreak:
out(w, crTag)
case blackfriday.Hardbreak:
out(w, breakTag)
case blackfriday.Emph:
if entering {
out(w, emphTag)
} else {
out(w, emphCloseTag)
}
case blackfriday.Strong:
if entering {
out(w, strongTag)
} else {
out(w, strongCloseTag)
}
case blackfriday.Link:
// Don't render the link text for automatic links, because this
// will only duplicate the URL in the roff output.
// See https://daringfireball.net/projects/markdown/syntax#autolink
if !bytes.Equal(node.LinkData.Destination, node.FirstChild.Literal) {
out(w, string(node.FirstChild.Literal))
}
// Hyphens in a link must be escaped to avoid word-wrap in the rendered man page.
escapedLink := strings.ReplaceAll(string(node.LinkData.Destination), "-", "\\-")
out(w, linkTag+escapedLink+linkCloseTag)
walkAction = blackfriday.SkipChildren
case blackfriday.Image:
// ignore images
walkAction = blackfriday.SkipChildren
case blackfriday.Code:
out(w, codespanTag)
escapeSpecialChars(w, node.Literal)
out(w, codespanCloseTag)
case blackfriday.Document:
break
case blackfriday.Paragraph:
// roff .PP markers break lists
if r.listDepth > 0 {
return blackfriday.GoToNext
}
if entering {
out(w, paraTag)
} else {
out(w, crTag)
}
case blackfriday.BlockQuote:
if entering {
out(w, quoteTag)
} else {
out(w, quoteCloseTag)
}
case blackfriday.Heading:
r.handleHeading(w, node, entering)
case blackfriday.HorizontalRule:
out(w, hruleTag)
case blackfriday.List:
r.handleList(w, node, entering)
case blackfriday.Item:
r.handleItem(w, node, entering)
case blackfriday.CodeBlock:
out(w, codeTag)
escapeSpecialChars(w, node.Literal)
out(w, codeCloseTag)
case blackfriday.Table:
r.handleTable(w, node, entering)
case blackfriday.TableHead:
case blackfriday.TableBody:
case blackfriday.TableRow:
// no action as cell entries do all the nroff formatting
return blackfriday.GoToNext
case blackfriday.TableCell:
r.handleTableCell(w, node, entering)
case blackfriday.HTMLSpan:
// ignore other HTML tags
case blackfriday.HTMLBlock:
if bytes.HasPrefix(node.Literal, []byte("<!--")) {
break // ignore comments, no warning
}
fmt.Fprintln(os.Stderr, "WARNING: go-md2man does not handle node type "+node.Type.String())
default:
fmt.Fprintln(os.Stderr, "WARNING: go-md2man does not handle node type "+node.Type.String())
}
return walkAction
}
func (r *roffRenderer) handleHeading(w io.Writer, node *blackfriday.Node, entering bool) {
if entering {
switch node.Level {
case 1:
if !r.firstHeader {
out(w, titleHeader)
r.firstHeader = true
break
}
out(w, topLevelHeader)
case 2:
out(w, secondLevelHdr)
default:
out(w, otherHeader)
}
}
}
func (r *roffRenderer) handleList(w io.Writer, node *blackfriday.Node, entering bool) {
openTag := listTag
closeTag := listCloseTag
if node.ListFlags&blackfriday.ListTypeDefinition != 0 {
// tags for definition lists handled within Item node
openTag = ""
closeTag = ""
}
if entering {
r.listDepth++
if node.ListFlags&blackfriday.ListTypeOrdered != 0 {
r.listCounters = append(r.listCounters, 1)
}
out(w, openTag)
} else {
if node.ListFlags&blackfriday.ListTypeOrdered != 0 {
r.listCounters = r.listCounters[:len(r.listCounters)-1]
}
out(w, closeTag)
r.listDepth--
}
}
func (r *roffRenderer) handleItem(w io.Writer, node *blackfriday.Node, entering bool) {
if entering {
if node.ListFlags&blackfriday.ListTypeOrdered != 0 {
out(w, fmt.Sprintf(".IP \"%3d.\" 5\n", r.listCounters[len(r.listCounters)-1]))
r.listCounters[len(r.listCounters)-1]++
} else if node.ListFlags&blackfriday.ListTypeTerm != 0 {
// DT (definition term): line just before DD (see below).
out(w, dtTag)
r.firstDD = true
} else if node.ListFlags&blackfriday.ListTypeDefinition != 0 {
// DD (definition description): line that starts with ": ".
//
// We have to distinguish between the first DD and the
// subsequent ones, as there should be no vertical
// whitespace between the DT and the first DD.
if r.firstDD {
r.firstDD = false
} else {
out(w, dd2Tag)
}
} else {
out(w, ".IP \\(bu 2\n")
}
} else {
out(w, "\n")
}
}
func (r *roffRenderer) handleTable(w io.Writer, node *blackfriday.Node, entering bool) {
if entering {
out(w, tableStart)
// call walker to count cells (and rows?) so format section can be produced
columns := countColumns(node)
out(w, strings.Repeat("l ", columns)+"\n")
out(w, strings.Repeat("l ", columns)+".\n")
} else {
out(w, tableEnd)
}
}
func (r *roffRenderer) handleTableCell(w io.Writer, node *blackfriday.Node, entering bool) {
if entering {
var start string
if node.Prev != nil && node.Prev.Type == blackfriday.TableCell {
start = "\t"
}
if node.IsHeader {
start += strongTag
} else if nodeLiteralSize(node) > 30 {
start += tableCellStart
}
out(w, start)
} else {
var end string
if node.IsHeader {
end = strongCloseTag
} else if nodeLiteralSize(node) > 30 {
end = tableCellEnd
}
if node.Next == nil && end != tableCellEnd {
// Last cell: need to carriage return if we are at the end of the
// header row and content isn't wrapped in a "tablecell"
end += crTag
}
out(w, end)
}
}
func nodeLiteralSize(node *blackfriday.Node) int {
total := 0
for n := node.FirstChild; n != nil; n = n.FirstChild {
total += len(n.Literal)
}
return total
}
// because roff format requires knowing the column count before outputting any table
// data we need to walk a table tree and count the columns
func countColumns(node *blackfriday.Node) int {
var columns int
node.Walk(func(node *blackfriday.Node, entering bool) blackfriday.WalkStatus {
switch node.Type {
case blackfriday.TableRow:
if !entering {
return blackfriday.Terminate
}
case blackfriday.TableCell:
if entering {
columns++
}
default:
}
return blackfriday.GoToNext
})
return columns
}
func out(w io.Writer, output string) {
io.WriteString(w, output) // nolint: errcheck
}
func escapeSpecialChars(w io.Writer, text []byte) {
scanner := bufio.NewScanner(bytes.NewReader(text))
// count the number of lines in the text
// we need to know this to avoid adding a newline after the last line
n := bytes.Count(text, []byte{'\n'})
idx := 0
for scanner.Scan() {
dt := scanner.Bytes()
if idx < n {
idx++
dt = append(dt, '\n')
}
escapeSpecialCharsLine(w, dt)
}
if err := scanner.Err(); err != nil {
panic(err)
}
}
func escapeSpecialCharsLine(w io.Writer, text []byte) {
for i := 0; i < len(text); i++ {
// escape initial apostrophe or period
if len(text) >= 1 && (text[0] == '\'' || text[0] == '.') {
out(w, "\\&")
}
// directly copy normal characters
org := i
for i < len(text) && text[i] != '\\' {
i++
}
if i > org {
w.Write(text[org:i]) // nolint: errcheck
}
// escape a character
if i >= len(text) {
break
}
w.Write([]byte{'\\', text[i]}) // nolint: errcheck
}
}

View File

@ -18,7 +18,6 @@
// tag is deprecated and thus should not be used.
// Go versions prior to 1.4 are disabled because they use a different layout
// for interfaces which make the implementation of unsafeReflectValue more complex.
//go:build !js && !appengine && !safe && !disableunsafe && go1.4
// +build !js,!appengine,!safe,!disableunsafe,go1.4
package spew

View File

@ -16,7 +16,6 @@
// when the code is running on Google App Engine, compiled by GopherJS, or
// "-tags safe" is added to the go build command line. The "disableunsafe"
// tag is deprecated and thus should not be used.
//go:build js || appengine || safe || disableunsafe || !go1.4
// +build js appengine safe disableunsafe !go1.4
package spew

View File

@ -254,15 +254,15 @@ pointer addresses used to indirect to the final value. It provides the
following features over the built-in printing facilities provided by the fmt
package:
- Pointers are dereferenced and followed
- Circular data structures are detected and handled properly
- Custom Stringer/error interfaces are optionally invoked, including
on unexported types
- Custom types which only implement the Stringer/error interfaces via
a pointer receiver are optionally invoked when passing non-pointer
variables
- Byte arrays and slices are dumped like the hexdump -C command which
includes offsets, byte values in hex, and ASCII output
* Pointers are dereferenced and followed
* Circular data structures are detected and handled properly
* Custom Stringer/error interfaces are optionally invoked, including
on unexported types
* Custom types which only implement the Stringer/error interfaces via
a pointer receiver are optionally invoked when passing non-pointer
variables
* Byte arrays and slices are dumped like the hexdump -C command which
includes offsets, byte values in hex, and ASCII output
The configuration options are controlled by modifying the public members
of c. See ConfigState for options documentation.
@ -295,12 +295,12 @@ func (c *ConfigState) convertArgs(args []interface{}) (formatters []interface{})
// NewDefaultConfig returns a ConfigState with the following default settings.
//
// Indent: " "
// MaxDepth: 0
// DisableMethods: false
// DisablePointerMethods: false
// ContinueOnMethod: false
// SortKeys: false
// Indent: " "
// MaxDepth: 0
// DisableMethods: false
// DisablePointerMethods: false
// ContinueOnMethod: false
// SortKeys: false
func NewDefaultConfig() *ConfigState {
return &ConfigState{Indent: " "}
}

View File

@ -21,36 +21,35 @@ debugging.
A quick overview of the additional features spew provides over the built-in
printing facilities for Go data types are as follows:
- Pointers are dereferenced and followed
- Circular data structures are detected and handled properly
- Custom Stringer/error interfaces are optionally invoked, including
on unexported types
- Custom types which only implement the Stringer/error interfaces via
a pointer receiver are optionally invoked when passing non-pointer
variables
- Byte arrays and slices are dumped like the hexdump -C command which
includes offsets, byte values in hex, and ASCII output (only when using
Dump style)
* Pointers are dereferenced and followed
* Circular data structures are detected and handled properly
* Custom Stringer/error interfaces are optionally invoked, including
on unexported types
* Custom types which only implement the Stringer/error interfaces via
a pointer receiver are optionally invoked when passing non-pointer
variables
* Byte arrays and slices are dumped like the hexdump -C command which
includes offsets, byte values in hex, and ASCII output (only when using
Dump style)
There are two different approaches spew allows for dumping Go data structures:
- Dump style which prints with newlines, customizable indentation,
and additional debug information such as types and all pointer addresses
used to indirect to the final value
- A custom Formatter interface that integrates cleanly with the standard fmt
package and replaces %v, %+v, %#v, and %#+v to provide inline printing
similar to the default %v while providing the additional functionality
outlined above and passing unsupported format verbs such as %x and %q
along to fmt
* Dump style which prints with newlines, customizable indentation,
and additional debug information such as types and all pointer addresses
used to indirect to the final value
* A custom Formatter interface that integrates cleanly with the standard fmt
package and replaces %v, %+v, %#v, and %#+v to provide inline printing
similar to the default %v while providing the additional functionality
outlined above and passing unsupported format verbs such as %x and %q
along to fmt
# Quick Start
Quick Start
This section demonstrates how to quickly get started with spew. See the
sections below for further details on formatting and configuration options.
To dump a variable with full newlines, indentation, type, and pointer
information use Dump, Fdump, or Sdump:
spew.Dump(myVar1, myVar2, ...)
spew.Fdump(someWriter, myVar1, myVar2, ...)
str := spew.Sdump(myVar1, myVar2, ...)
@ -59,13 +58,12 @@ Alternatively, if you would prefer to use format strings with a compacted inline
printing style, use the convenience wrappers Printf, Fprintf, etc with
%v (most compact), %+v (adds pointer addresses), %#v (adds types), or
%#+v (adds types and pointer addresses):
spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
# Configuration Options
Configuration Options
Configuration of spew is handled by fields in the ConfigState type. For
convenience, all of the top-level functions use a global state available
@ -76,52 +74,51 @@ equivalent to the top-level functions. This allows concurrent configuration
options. See the ConfigState documentation for more details.
The following configuration options are available:
* Indent
String to use for each indentation level for Dump functions.
It is a single space by default. A popular alternative is "\t".
- Indent
String to use for each indentation level for Dump functions.
It is a single space by default. A popular alternative is "\t".
* MaxDepth
Maximum number of levels to descend into nested data structures.
There is no limit by default.
- MaxDepth
Maximum number of levels to descend into nested data structures.
There is no limit by default.
* DisableMethods
Disables invocation of error and Stringer interface methods.
Method invocation is enabled by default.
- DisableMethods
Disables invocation of error and Stringer interface methods.
Method invocation is enabled by default.
* DisablePointerMethods
Disables invocation of error and Stringer interface methods on types
which only accept pointer receivers from non-pointer variables.
Pointer method invocation is enabled by default.
- DisablePointerMethods
Disables invocation of error and Stringer interface methods on types
which only accept pointer receivers from non-pointer variables.
Pointer method invocation is enabled by default.
* DisablePointerAddresses
DisablePointerAddresses specifies whether to disable the printing of
pointer addresses. This is useful when diffing data structures in tests.
- DisablePointerAddresses
DisablePointerAddresses specifies whether to disable the printing of
pointer addresses. This is useful when diffing data structures in tests.
* DisableCapacities
DisableCapacities specifies whether to disable the printing of
capacities for arrays, slices, maps and channels. This is useful when
diffing data structures in tests.
- DisableCapacities
DisableCapacities specifies whether to disable the printing of
capacities for arrays, slices, maps and channels. This is useful when
diffing data structures in tests.
* ContinueOnMethod
Enables recursion into types after invoking error and Stringer interface
methods. Recursion after method invocation is disabled by default.
- ContinueOnMethod
Enables recursion into types after invoking error and Stringer interface
methods. Recursion after method invocation is disabled by default.
* SortKeys
Specifies map keys should be sorted before being printed. Use
this to have a more deterministic, diffable output. Note that
only native types (bool, int, uint, floats, uintptr and string)
and types which implement error or Stringer interfaces are
supported with other types sorted according to the
reflect.Value.String() output which guarantees display
stability. Natural map order is used by default.
- SortKeys
Specifies map keys should be sorted before being printed. Use
this to have a more deterministic, diffable output. Note that
only native types (bool, int, uint, floats, uintptr and string)
and types which implement error or Stringer interfaces are
supported with other types sorted according to the
reflect.Value.String() output which guarantees display
stability. Natural map order is used by default.
* SpewKeys
Specifies that, as a last resort attempt, map keys should be
spewed to strings and sorted by those strings. This is only
considered if SortKeys is true.
- SpewKeys
Specifies that, as a last resort attempt, map keys should be
spewed to strings and sorted by those strings. This is only
considered if SortKeys is true.
# Dump Usage
Dump Usage
Simply call spew.Dump with a list of variables you want to dump:
@ -136,7 +133,7 @@ A third option is to call spew.Sdump to get the formatted output as a string:
str := spew.Sdump(myVar1, myVar2, ...)
# Sample Dump Output
Sample Dump Output
See the Dump example for details on the setup of the types and variables being
shown here.
@ -153,14 +150,13 @@ shown here.
Byte (and uint8) arrays and slices are displayed uniquely like the hexdump -C
command as shown.
([]uint8) (len=32 cap=32) {
00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... |
00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0|
00000020 31 32 |12|
}
# Custom Formatter
Custom Formatter
Spew provides a custom formatter that implements the fmt.Formatter interface
so that it integrates cleanly with standard fmt package printing functions. The
@ -174,7 +170,7 @@ standard fmt package for formatting. In addition, the custom formatter ignores
the width and precision arguments (however they will still work on the format
specifiers not handled by the custom formatter).
# Custom Formatter Usage
Custom Formatter Usage
The simplest way to make use of the spew custom formatter is to call one of the
convenience functions such as spew.Printf, spew.Println, or spew.Printf. The
@ -188,17 +184,15 @@ functions have syntax you are most likely already familiar with:
See the Index for the full list convenience functions.
# Sample Formatter Output
Sample Formatter Output
Double pointer to a uint8:
%v: <**>5
%+v: <**>(0xf8400420d0->0xf8400420c8)5
%#v: (**uint8)5
%#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5
Pointer to circular struct with a uint8 field and a pointer to itself:
%v: <*>{1 <*><shown>}
%+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)<shown>}
%#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)<shown>}
@ -207,7 +201,7 @@ Pointer to circular struct with a uint8 field and a pointer to itself:
See the Printf example for details on the setup of variables being shown
here.
# Errors
Errors
Since it is possible for custom Stringer/error interfaces to panic, spew
detects them and handles them internally by printing the panic information

View File

@ -488,15 +488,15 @@ pointer addresses used to indirect to the final value. It provides the
following features over the built-in printing facilities provided by the fmt
package:
- Pointers are dereferenced and followed
- Circular data structures are detected and handled properly
- Custom Stringer/error interfaces are optionally invoked, including
on unexported types
- Custom types which only implement the Stringer/error interfaces via
a pointer receiver are optionally invoked when passing non-pointer
variables
- Byte arrays and slices are dumped like the hexdump -C command which
includes offsets, byte values in hex, and ASCII output
* Pointers are dereferenced and followed
* Circular data structures are detected and handled properly
* Custom Stringer/error interfaces are optionally invoked, including
on unexported types
* Custom types which only implement the Stringer/error interfaces via
a pointer receiver are optionally invoked when passing non-pointer
variables
* Byte arrays and slices are dumped like the hexdump -C command which
includes offsets, byte values in hex, and ASCII output
The configuration options are controlled by an exported package global,
spew.Config. See ConfigState for options documentation.

View File

@ -37,8 +37,8 @@ type Namespace struct {
// WithConstLabels returns a namespace with the provided set of labels merged
// with the existing constant labels on the namespace.
//
// Only metrics created with the returned namespace will get the new constant
// labels. The returned namespace must be registered separately.
// Only metrics created with the returned namespace will get the new constant
// labels. The returned namespace must be registered separately.
func (n *Namespace) WithConstLabels(labels Labels) *Namespace {
n.mu.Lock()
ns := &Namespace{

View File

@ -74,13 +74,14 @@ import (
//
// The JSON null value unmarshals into an interface, map, pointer, or slice
// by setting that Go value to nil. Because null is often used in JSON to mean
// not present, unmarshaling a JSON null into any other Go type has no effect
// ``not present,'' unmarshaling a JSON null into any other Go type has no effect
// on the value and produces no error.
//
// When unmarshaling quoted strings, invalid UTF-8 or
// invalid UTF-16 surrogate pairs are not treated as an error.
// Instead, they are replaced by the Unicode replacement
// character U+FFFD.
//
func Unmarshal(data []byte, v interface{}) error {
// Check for well-formedness.
// Avoids filling out half a data structure

View File

@ -58,7 +58,6 @@ import (
// becomes a member of the object unless
// - the field's tag is "-", or
// - the field is empty and its tag specifies the "omitempty" option.
//
// The empty values are false, 0, any
// nil pointer or interface value, and any array, slice, map, or string of
// length zero. The object's default key string is the struct field name
@ -66,28 +65,28 @@ import (
// the struct field's tag value is the key name, followed by an optional comma
// and options. Examples:
//
// // Field is ignored by this package.
// Field int `json:"-"`
// // Field is ignored by this package.
// Field int `json:"-"`
//
// // Field appears in JSON as key "myName".
// Field int `json:"myName"`
// // Field appears in JSON as key "myName".
// Field int `json:"myName"`
//
// // Field appears in JSON as key "myName" and
// // the field is omitted from the object if its value is empty,
// // as defined above.
// Field int `json:"myName,omitempty"`
// // Field appears in JSON as key "myName" and
// // the field is omitted from the object if its value is empty,
// // as defined above.
// Field int `json:"myName,omitempty"`
//
// // Field appears in JSON as key "Field" (the default), but
// // the field is skipped if empty.
// // Note the leading comma.
// Field int `json:",omitempty"`
// // Field appears in JSON as key "Field" (the default), but
// // the field is skipped if empty.
// // Note the leading comma.
// Field int `json:",omitempty"`
//
// The "string" option signals that a field is stored as JSON inside a
// JSON-encoded string. It applies only to fields of string, floating point,
// integer, or boolean types. This extra level of encoding is sometimes used
// when communicating with JavaScript programs:
//
// Int64String int64 `json:",string"`
// Int64String int64 `json:",string"`
//
// The key name will be used if it's a non-empty string consisting of
// only Unicode letters, digits, dollar signs, percent signs, hyphens,
@ -134,6 +133,7 @@ import (
// JSON cannot represent cyclic data structures and Marshal does not
// handle them. Passing cyclic structures to Marshal will result in
// an infinite recursion.
//
func Marshal(v interface{}) ([]byte, error) {
return marshal(v, false)
}

View File

@ -24,9 +24,8 @@ const (
// 4) simpleLetterEqualFold, no specials, no non-letters.
//
// The letters S and K are special because they map to 3 runes, not just 2:
// - S maps to s and to U+017F 'ſ' Latin small letter long s
// - k maps to K and to U+212A '' Kelvin sign
//
// * S maps to s and to U+017F 'ſ' Latin small letter long s
// * k maps to K and to U+212A '' Kelvin sign
// See https://play.golang.org/p/tTxjOc0OGo
//
// The returned function is specialized for matching against s and

View File

@ -242,6 +242,7 @@ var _ Unmarshaler = (*RawMessage)(nil)
// Number, for JSON numbers
// string, for JSON string literals
// nil, for JSON null
//
type Token interface{}
const (

View File

@ -72,7 +72,7 @@ type IteratorWithKey interface {
//
// Essentially it is the same as IteratorWithIndex, but provides additional:
//
// # Prev() function to enable traversal in reverse
// Prev() function to enable traversal in reverse
//
// Last() function to move the iterator to the last element.
//
@ -105,7 +105,7 @@ type ReverseIteratorWithIndex interface {
//
// Essentially it is the same as IteratorWithKey, but provides additional:
//
// # Prev() function to enable traversal in reverse
// Prev() function to enable traversal in reverse
//
// Last() function to move the iterator to the last element.
type ReverseIteratorWithKey interface {

View File

@ -102,7 +102,7 @@ func (list *List) Values() []interface{} {
return newElements
}
// IndexOf returns index of provided element
//IndexOf returns index of provided element
func (list *List) IndexOf(value interface{}) int {
if list.size == 0 {
return -1

View File

@ -10,10 +10,9 @@ import "time"
// which will panic if a or b are not of the asserted type.
//
// Should return a number:
//
// negative , if a < b
// zero , if a == b
// positive , if a > b
// negative , if a < b
// zero , if a == b
// positive , if a > b
type Comparator func(a, b interface{}) int
// StringComparator provides a fast comparison on strings

View File

@ -1,6 +1,4 @@
//go:build go1.8
// +build go1.8
// Code generated by "httpsnoop/codegen"; DO NOT EDIT.
package httpsnoop

View File

@ -1,6 +1,4 @@
//go:build !go1.8
// +build !go1.8
// Code generated by "httpsnoop/codegen"; DO NOT EDIT.
package httpsnoop

View File

@ -347,9 +347,8 @@ const (
// 4) simpleLetterEqualFold, no specials, no non-letters.
//
// The letters S and K are special because they map to 3 runes, not just 2:
// - S maps to s and to U+017F 'ſ' Latin small letter long s
// - k maps to K and to U+212A '' Kelvin sign
//
// * S maps to s and to U+017F 'ſ' Latin small letter long s
// * k maps to K and to U+212A '' Kelvin sign
// See http://play.golang.org/p/tTxjOc0OGo
//
// The returned function is specialized for matching against s and

View File

@ -64,12 +64,12 @@ func JSONToYAML(j []byte) ([]byte, error) {
// this method should be a no-op.
//
// Things YAML can do that are not supported by JSON:
// - In YAML you can have binary and null keys in your maps. These are invalid
// in JSON. (int and float keys are converted to strings.)
// - Binary data in YAML with the !!binary tag is not supported. If you want to
// use binary data with this library, encode the data as base64 as usual but do
// not use the !!binary tag in your YAML. This will ensure the original base64
// encoded data makes it all the way through to the JSON.
// * In YAML you can have binary and null keys in your maps. These are invalid
// in JSON. (int and float keys are converted to strings.)
// * Binary data in YAML with the !!binary tag is not supported. If you want to
// use binary data with this library, encode the data as base64 as usual but do
// not use the !!binary tag in your YAML. This will ensure the original base64
// encoded data makes it all the way through to the JSON.
func YAMLToJSON(y []byte) ([]byte, error) {
return yamlToJSON(y, nil)
}

89
vendor/github.com/go-git/gcfg/doc.go generated vendored
View File

@ -4,29 +4,29 @@
// This package is still a work in progress; see the sections below for planned
// changes.
//
// # Syntax
// Syntax
//
// The syntax is based on that used by git config:
// http://git-scm.com/docs/git-config#_syntax .
// There are some (planned) differences compared to the git config format:
// - improve data portability:
// - must be encoded in UTF-8 (for now) and must not contain the 0 byte
// - include and "path" type is not supported
// (path type may be implementable as a user-defined type)
// - internationalization
// - section and variable names can contain unicode letters, unicode digits
// (as defined in http://golang.org/ref/spec#Characters ) and hyphens
// (U+002D), starting with a unicode letter
// - disallow potentially ambiguous or misleading definitions:
// - `[sec.sub]` format is not allowed (deprecated in gitconfig)
// - `[sec ""]` is not allowed
// - use `[sec]` for section name "sec" and empty subsection name
// - (planned) within a single file, definitions must be contiguous for each:
// - section: '[secA]' -> '[secB]' -> '[secA]' is an error
// - subsection: '[sec "A"]' -> '[sec "B"]' -> '[sec "A"]' is an error
// - multivalued variable: 'multi=a' -> 'other=x' -> 'multi=b' is an error
// - improve data portability:
// - must be encoded in UTF-8 (for now) and must not contain the 0 byte
// - include and "path" type is not supported
// (path type may be implementable as a user-defined type)
// - internationalization
// - section and variable names can contain unicode letters, unicode digits
// (as defined in http://golang.org/ref/spec#Characters ) and hyphens
// (U+002D), starting with a unicode letter
// - disallow potentially ambiguous or misleading definitions:
// - `[sec.sub]` format is not allowed (deprecated in gitconfig)
// - `[sec ""]` is not allowed
// - use `[sec]` for section name "sec" and empty subsection name
// - (planned) within a single file, definitions must be contiguous for each:
// - section: '[secA]' -> '[secB]' -> '[secA]' is an error
// - subsection: '[sec "A"]' -> '[sec "B"]' -> '[sec "A"]' is an error
// - multivalued variable: 'multi=a' -> 'other=x' -> 'multi=b' is an error
//
// # Data structure
// Data structure
//
// The functions in this package read values into a user-defined struct.
// Each section corresponds to a struct field in the config struct, and each
@ -56,7 +56,7 @@
// or when a field is not of a suitable type (either a struct or a map with
// string keys and pointer-to-struct values).
//
// # Parsing of values
// Parsing of values
//
// The section structs in the config struct may contain single-valued or
// multi-valued variables. Variables of unnamed slice type (that is, a type
@ -98,17 +98,17 @@
// The types subpackage for provides helpers for parsing "enum-like" and integer
// types.
//
// # Error handling
// Error handling
//
// There are 3 types of errors:
//
// - programmer errors / panics:
// - invalid configuration structure
// - data errors:
// - fatal errors:
// - invalid configuration syntax
// - warnings:
// - data that doesn't belong to any part of the config structure
// - programmer errors / panics:
// - invalid configuration structure
// - data errors:
// - fatal errors:
// - invalid configuration syntax
// - warnings:
// - data that doesn't belong to any part of the config structure
//
// Programmer errors trigger panics. These are should be fixed by the programmer
// before releasing code that uses gcfg.
@ -122,23 +122,24 @@
// filtered out programmatically. To ignore extra data warnings, wrap the
// gcfg.Read*Into invocation into a call to gcfg.FatalOnly.
//
// # TODO
// TODO
//
// The following is a list of changes under consideration:
// - documentation
// - self-contained syntax documentation
// - more practical examples
// - move TODOs to issue tracker (eventually)
// - syntax
// - reconsider valid escape sequences
// (gitconfig doesn't support \r in value, \t in subsection name, etc.)
// - reading / parsing gcfg files
// - define internal representation structure
// - support multiple inputs (readers, strings, files)
// - support declaring encoding (?)
// - support varying fields sets for subsections (?)
// - writing gcfg files
// - error handling
// - make error context accessible programmatically?
// - limit input size?
// - documentation
// - self-contained syntax documentation
// - more practical examples
// - move TODOs to issue tracker (eventually)
// - syntax
// - reconsider valid escape sequences
// (gitconfig doesn't support \r in value, \t in subsection name, etc.)
// - reading / parsing gcfg files
// - define internal representation structure
// - support multiple inputs (readers, strings, files)
// - support declaring encoding (?)
// - support varying fields sets for subsections (?)
// - writing gcfg files
// - error handling
// - make error context accessible programmatically?
// - limit input size?
//
package gcfg // import "github.com/go-git/gcfg"

View File

@ -8,9 +8,10 @@ import (
// fatal errors. That is, errors (warnings) indicating data for unknown
// sections / variables is ignored. Example invocation:
//
// err := gcfg.FatalOnly(gcfg.ReadFileInto(&cfg, configFile))
// if err != nil {
// ...
// err := gcfg.FatalOnly(gcfg.ReadFileInto(&cfg, configFile))
// if err != nil {
// ...
//
func FatalOnly(err error) error {
return warnings.FatalOnly(err)
}

View File

@ -18,6 +18,7 @@ import (
// The position Pos, if valid, points to the beginning of
// the offending token, and the error condition is described
// by Msg.
//
type Error struct {
Pos token.Position
Msg string
@ -35,6 +36,7 @@ func (e Error) Error() string {
// ErrorList is a list of *Errors.
// The zero value for an ErrorList is an empty ErrorList ready to use.
//
type ErrorList []*Error
// Add adds an Error with given position and error message to an ErrorList.
@ -64,6 +66,7 @@ func (p ErrorList) Less(i, j int) bool {
// Sort sorts an ErrorList. *Error entries are sorted by position,
// other errors are sorted by error message, and before any *Error
// entry.
//
func (p ErrorList) Sort() {
sort.Sort(p)
}
@ -106,6 +109,7 @@ func (p ErrorList) Err() error {
// PrintError is a utility function that prints a list of errors to w,
// one error per line, if the err parameter is an ErrorList. Otherwise
// it prints the err string.
//
func PrintError(w io.Writer, err error) {
if list, ok := err.(ErrorList); ok {
for _, e := range list {

View File

@ -216,7 +216,7 @@ func newValue(c *warnings.Collector, sect string, vCfg reflect.Value,
}
func set(c *warnings.Collector, cfg interface{}, sect, sub, name string,
value string, blankValue bool, subsectPass bool) error {
value string, blankValue bool, subsectPass bool) error {
//
vPCfg := reflect.ValueOf(cfg)
if vPCfg.Kind() != reflect.Ptr || vPCfg.Elem().Kind() != reflect.Struct {

View File

@ -18,6 +18,7 @@ import (
// Position describes an arbitrary source position
// including the file, line, and column location.
// A Position is valid if the line number is > 0.
//
type Position struct {
Filename string // filename, if any
Offset int // offset, starting at 0
@ -34,6 +35,7 @@ func (pos *Position) IsValid() bool { return pos.Line > 0 }
// line:column valid position without file name
// file invalid position with file name
// - invalid position without file name
//
func (pos Position) String() string {
s := pos.Filename
if pos.IsValid() {
@ -67,12 +69,14 @@ func (pos Position) String() string {
// equivalent to comparing the respective source file offsets. If p and q
// are in different files, p < q is true if the file implied by p was added
// to the respective file set before the file implied by q.
//
type Pos int
// The zero value for Pos is NoPos; there is no file and line information
// associated with it, and NoPos().IsValid() is false. NoPos is always
// smaller than any other Pos value. The corresponding Position value
// for NoPos is the zero value for Position.
//
const NoPos Pos = 0
// IsValid returns true if the position is valid.
@ -85,6 +89,7 @@ func (p Pos) IsValid() bool {
// A File is a handle for a file belonging to a FileSet.
// A File has a name, size, and line offset table.
//
type File struct {
set *FileSet
name string // file name as provided to AddFile
@ -122,6 +127,7 @@ func (f *File) LineCount() int {
// AddLine adds the line offset for a new line.
// The line offset must be larger than the offset for the previous line
// and smaller than the file size; otherwise the line offset is ignored.
//
func (f *File) AddLine(offset int) {
f.set.mutex.Lock()
if i := len(f.lines); (i == 0 || f.lines[i-1] < offset) && offset < f.size {
@ -137,6 +143,7 @@ func (f *File) AddLine(offset int) {
// Each line offset must be larger than the offset for the previous line
// and smaller than the file size; otherwise SetLines fails and returns
// false.
//
func (f *File) SetLines(lines []int) bool {
// verify validity of lines table
size := f.size
@ -190,6 +197,7 @@ type lineInfo struct {
//
// AddLineInfo is typically used to register alternative position
// information for //line filename:line comments in source files.
//
func (f *File) AddLineInfo(offset int, filename string, line int) {
f.set.mutex.Lock()
if i := len(f.infos); i == 0 || f.infos[i-1].Offset < offset && offset < f.size {
@ -201,6 +209,7 @@ func (f *File) AddLineInfo(offset int, filename string, line int) {
// Pos returns the Pos value for the given file offset;
// the offset must be <= f.Size().
// f.Pos(f.Offset(p)) == p.
//
func (f *File) Pos(offset int) Pos {
if offset > f.size {
panic("illegal file offset")
@ -211,6 +220,7 @@ func (f *File) Pos(offset int) Pos {
// Offset returns the offset for the given file position p;
// p must be a valid Pos value in that file.
// f.Offset(f.Pos(offset)) == offset.
//
func (f *File) Offset(p Pos) int {
if int(p) < f.base || int(p) > f.base+f.size {
panic("illegal Pos value")
@ -220,6 +230,7 @@ func (f *File) Offset(p Pos) int {
// Line returns the line number for the given file position p;
// p must be a Pos value in that file or NoPos.
//
func (f *File) Line(p Pos) int {
// TODO(gri) this can be implemented much more efficiently
return f.Position(p).Line
@ -257,6 +268,7 @@ func (f *File) position(p Pos) (pos Position) {
// Position returns the Position value for the given file position p;
// p must be a Pos value in that file or NoPos.
//
func (f *File) Position(p Pos) (pos Position) {
if p != NoPos {
if int(p) < f.base || int(p) > f.base+f.size {
@ -273,6 +285,7 @@ func (f *File) Position(p Pos) (pos Position) {
// A FileSet represents a set of source files.
// Methods of file sets are synchronized; multiple goroutines
// may invoke them concurrently.
//
type FileSet struct {
mutex sync.RWMutex // protects the file set
base int // base offset for the next file
@ -289,6 +302,7 @@ func NewFileSet() *FileSet {
// Base returns the minimum base offset that must be provided to
// AddFile when adding the next file.
//
func (s *FileSet) Base() int {
s.mutex.RLock()
b := s.base
@ -311,6 +325,7 @@ func (s *FileSet) Base() int {
// with offs in the range [0, size] and thus p in the range [base, base+size].
// For convenience, File.Pos may be used to create file-specific position
// values from a file offset.
//
func (s *FileSet) AddFile(filename string, base, size int) *File {
s.mutex.Lock()
defer s.mutex.Unlock()
@ -332,6 +347,7 @@ func (s *FileSet) AddFile(filename string, base, size int) *File {
// Iterate calls f for the files in the file set in the order they were added
// until f returns false.
//
func (s *FileSet) Iterate(f func(*File) bool) {
for i := 0; ; i++ {
var file *File
@ -370,6 +386,7 @@ func (s *FileSet) file(p Pos) *File {
// File returns the file that contains the position p.
// If no such file is found (for instance for p == NoPos),
// the result is nil.
//
func (s *FileSet) File(p Pos) (f *File) {
if p != NoPos {
s.mutex.RLock()

View File

@ -7,6 +7,7 @@
//
// Note that the API for the token package may change to accommodate new
// features or implementation changes in gcfg.
//
package token
import "strconv"
@ -57,6 +58,7 @@ var tokens = [...]string{
// sequence (e.g., for the token ASSIGN, the string is "="). For all other
// tokens the string corresponds to the token constant name (e.g. for the
// token IDENT, the string is "IDENT").
//
func (tok Token) String() string {
s := ""
if 0 <= tok && tok < Token(len(tokens)) {
@ -72,8 +74,10 @@ func (tok Token) String() string {
// IsLiteral returns true for tokens corresponding to identifiers
// and basic type literals; it returns false otherwise.
//
func (tok Token) IsLiteral() bool { return literal_beg < tok && tok < literal_end }
// IsOperator returns true for tokens corresponding to operators and
// delimiters; it returns false otherwise.
//
func (tok Token) IsOperator() bool { return operator_beg < tok && tok < operator_end }

View File

@ -25,7 +25,7 @@ type Memory struct {
tempCount int
}
// New returns a new Memory filesystem.
//New returns a new Memory filesystem.
func New() billy.Filesystem {
fs := &Memory{s: newStorage()}
return chroot.New(fs, string(separator))

View File

@ -46,7 +46,7 @@ func walk(fs billy.Filesystem, path string, info os.FileInfo, walkFn filepath.Wa
return nil
}
// Walk walks the file tree rooted at root, calling fn for each file or
// Walk walks the file tree rooted at root, calling fn for each file or
// directory in the tree, including root. All errors that arise visiting files
// and directories are filtered by fn: see the WalkFunc documentation for
// details.
@ -54,7 +54,7 @@ func walk(fs billy.Filesystem, path string, info os.FileInfo, walkFn filepath.Wa
// The files are walked in lexical order, which makes the output deterministic
// but requires Walk to read an entire directory into memory before proceeding
// to walk that directory. Walk does not follow symbolic links.
//
//
// Function adapted from https://github.com/golang/go/blob/3b770f2ccb1fa6fecc22ea822a19447b10b70c5c/src/path/filepath/path.go#L500
func Walk(fs billy.Filesystem, root string, walkFn filepath.WalkFunc) error {
info, err := fs.Lstat(root)
@ -63,10 +63,10 @@ func Walk(fs billy.Filesystem, root string, walkFn filepath.WalkFunc) error {
} else {
err = walk(fs, root, info, walkFn)
}
if err == filepath.SkipDir {
return nil
}
return err
}

View File

@ -1,121 +1,122 @@
// Package config implements encoding and decoding of git config files.
//
// Configuration File
// ------------------
// Configuration File
// ------------------
//
// The Git configuration file contains a number of variables that affect
// the Git commands' behavior. The `.git/config` file in each repository
// is used to store the configuration for that repository, and
// `$HOME/.gitconfig` is used to store a per-user configuration as
// fallback values for the `.git/config` file. The file `/etc/gitconfig`
// can be used to store a system-wide default configuration.
// The Git configuration file contains a number of variables that affect
// the Git commands' behavior. The `.git/config` file in each repository
// is used to store the configuration for that repository, and
// `$HOME/.gitconfig` is used to store a per-user configuration as
// fallback values for the `.git/config` file. The file `/etc/gitconfig`
// can be used to store a system-wide default configuration.
//
// The configuration variables are used by both the Git plumbing
// and the porcelains. The variables are divided into sections, wherein
// the fully qualified variable name of the variable itself is the last
// dot-separated segment and the section name is everything before the last
// dot. The variable names are case-insensitive, allow only alphanumeric
// characters and `-`, and must start with an alphabetic character. Some
// variables may appear multiple times; we say then that the variable is
// multivalued.
// The configuration variables are used by both the Git plumbing
// and the porcelains. The variables are divided into sections, wherein
// the fully qualified variable name of the variable itself is the last
// dot-separated segment and the section name is everything before the last
// dot. The variable names are case-insensitive, allow only alphanumeric
// characters and `-`, and must start with an alphabetic character. Some
// variables may appear multiple times; we say then that the variable is
// multivalued.
//
// Syntax
// ~~~~~~
// Syntax
// ~~~~~~
//
// The syntax is fairly flexible and permissive; whitespaces are mostly
// ignored. The '#' and ';' characters begin comments to the end of line,
// blank lines are ignored.
// The syntax is fairly flexible and permissive; whitespaces are mostly
// ignored. The '#' and ';' characters begin comments to the end of line,
// blank lines are ignored.
//
// The file consists of sections and variables. A section begins with
// the name of the section in square brackets and continues until the next
// section begins. Section names are case-insensitive. Only alphanumeric
// characters, `-` and `.` are allowed in section names. Each variable
// must belong to some section, which means that there must be a section
// header before the first setting of a variable.
// The file consists of sections and variables. A section begins with
// the name of the section in square brackets and continues until the next
// section begins. Section names are case-insensitive. Only alphanumeric
// characters, `-` and `.` are allowed in section names. Each variable
// must belong to some section, which means that there must be a section
// header before the first setting of a variable.
//
// Sections can be further divided into subsections. To begin a subsection
// put its name in double quotes, separated by space from the section name,
// in the section header, like in the example below:
// Sections can be further divided into subsections. To begin a subsection
// put its name in double quotes, separated by space from the section name,
// in the section header, like in the example below:
//
// --------
// [section "subsection"]
// --------
// [section "subsection"]
//
// --------
// --------
//
// Subsection names are case sensitive and can contain any characters except
// newline (doublequote `"` and backslash can be included by escaping them
// as `\"` and `\\`, respectively). Section headers cannot span multiple
// lines. Variables may belong directly to a section or to a given subsection.
// You can have `[section]` if you have `[section "subsection"]`, but you
// don't need to.
// Subsection names are case sensitive and can contain any characters except
// newline (doublequote `"` and backslash can be included by escaping them
// as `\"` and `\\`, respectively). Section headers cannot span multiple
// lines. Variables may belong directly to a section or to a given subsection.
// You can have `[section]` if you have `[section "subsection"]`, but you
// don't need to.
//
// There is also a deprecated `[section.subsection]` syntax. With this
// syntax, the subsection name is converted to lower-case and is also
// compared case sensitively. These subsection names follow the same
// restrictions as section names.
// There is also a deprecated `[section.subsection]` syntax. With this
// syntax, the subsection name is converted to lower-case and is also
// compared case sensitively. These subsection names follow the same
// restrictions as section names.
//
// All the other lines (and the remainder of the line after the section
// header) are recognized as setting variables, in the form
// 'name = value' (or just 'name', which is a short-hand to say that
// the variable is the boolean "true").
// The variable names are case-insensitive, allow only alphanumeric characters
// and `-`, and must start with an alphabetic character.
// All the other lines (and the remainder of the line after the section
// header) are recognized as setting variables, in the form
// 'name = value' (or just 'name', which is a short-hand to say that
// the variable is the boolean "true").
// The variable names are case-insensitive, allow only alphanumeric characters
// and `-`, and must start with an alphabetic character.
//
// A line that defines a value can be continued to the next line by
// ending it with a `\`; the backquote and the end-of-line are
// stripped. Leading whitespaces after 'name =', the remainder of the
// line after the first comment character '#' or ';', and trailing
// whitespaces of the line are discarded unless they are enclosed in
// double quotes. Internal whitespaces within the value are retained
// verbatim.
// A line that defines a value can be continued to the next line by
// ending it with a `\`; the backquote and the end-of-line are
// stripped. Leading whitespaces after 'name =', the remainder of the
// line after the first comment character '#' or ';', and trailing
// whitespaces of the line are discarded unless they are enclosed in
// double quotes. Internal whitespaces within the value are retained
// verbatim.
//
// Inside double quotes, double quote `"` and backslash `\` characters
// must be escaped: use `\"` for `"` and `\\` for `\`.
// Inside double quotes, double quote `"` and backslash `\` characters
// must be escaped: use `\"` for `"` and `\\` for `\`.
//
// The following escape sequences (beside `\"` and `\\`) are recognized:
// `\n` for newline character (NL), `\t` for horizontal tabulation (HT, TAB)
// and `\b` for backspace (BS). Other char escape sequences (including octal
// escape sequences) are invalid.
// The following escape sequences (beside `\"` and `\\`) are recognized:
// `\n` for newline character (NL), `\t` for horizontal tabulation (HT, TAB)
// and `\b` for backspace (BS). Other char escape sequences (including octal
// escape sequences) are invalid.
//
// Includes
// ~~~~~~~~
// Includes
// ~~~~~~~~
//
// You can include one config file from another by setting the special
// `include.path` variable to the name of the file to be included. The
// variable takes a pathname as its value, and is subject to tilde
// expansion.
// You can include one config file from another by setting the special
// `include.path` variable to the name of the file to be included. The
// variable takes a pathname as its value, and is subject to tilde
// expansion.
//
// The included file is expanded immediately, as if its contents had been
// found at the location of the include directive. If the value of the
// `include.path` variable is a relative path, the path is considered to be
// relative to the configuration file in which the include directive was
// found. See below for examples.
// The included file is expanded immediately, as if its contents had been
// found at the location of the include directive. If the value of the
// `include.path` variable is a relative path, the path is considered to be
// relative to the configuration file in which the include directive was
// found. See below for examples.
//
//
// Example
// ~~~~~~~
// Example
// ~~~~~~~
//
// # Core variables
// [core]
// ; Don't trust file modes
// filemode = false
// # Core variables
// [core]
// ; Don't trust file modes
// filemode = false
//
// # Our diff algorithm
// [diff]
// external = /usr/local/bin/diff-wrapper
// renames = true
// # Our diff algorithm
// [diff]
// external = /usr/local/bin/diff-wrapper
// renames = true
//
// [branch "devel"]
// remote = origin
// merge = refs/heads/devel
// [branch "devel"]
// remote = origin
// merge = refs/heads/devel
//
// # Proxy settings
// [core]
// gitProxy="ssh" for "kernel.org"
// gitProxy=default-proxy ; for the rest
// # Proxy settings
// [core]
// gitProxy="ssh" for "kernel.org"
// gitProxy=default-proxy ; for the rest
//
// [include]
// path = /path/to/foo.inc ; include by absolute path
// path = foo ; expand "foo" relative to the current file
// path = ~/foo ; expand "foo" in your `$HOME` directory
//
// [include]
// path = /path/to/foo.inc ; include by absolute path
// path = foo ; expand "foo" relative to the current file
// path = ~/foo ; expand "foo" in your `$HOME` directory
package config

View File

@ -13,9 +13,8 @@ type Encoder struct {
var (
subsectionReplacer = strings.NewReplacer(`"`, `\"`, `\`, `\\`)
valueReplacer = strings.NewReplacer(`"`, `\"`, `\`, `\\`, "\n", `\n`, "\t", `\t`, "\b", `\b`)
valueReplacer = strings.NewReplacer(`"`, `\"`, `\`, `\\`, "\n", `\n`, "\t", `\t`, "\b", `\b`)
)
// NewEncoder returns a new encoder that writes to w.
func NewEncoder(w io.Writer) *Encoder {
return &Encoder{w}
@ -64,7 +63,7 @@ func (e *Encoder) encodeOptions(opts Options) error {
for _, o := range opts {
var value string
if strings.ContainsAny(o.Value, "#;\"\t\n\\") || strings.HasPrefix(o.Value, " ") || strings.HasSuffix(o.Value, " ") {
value = `"` + valueReplacer.Replace(o.Value) + `"`
value = `"`+valueReplacer.Replace(o.Value)+`"`
} else {
value = o.Value
}

View File

@ -34,7 +34,7 @@ func (opts Options) GoString() string {
// Get gets the value for the given key if set,
// otherwise it returns the empty string.
//
// # Note that there is no difference
// Note that there is no difference
//
// This matches git behaviour since git v1.8.1-rc1,
// if there are multiple definitions of a key, the

View File

@ -12,7 +12,7 @@ import (
// put its name in double quotes, separated by space from the section name,
// in the section header, like in the example below:
//
// [section "subsection"]
// [section "subsection"]
//
// All the other lines (and the remainder of the line after the section header)
// are recognized as option variables, in the form "name = value" (or just name,
@ -20,11 +20,12 @@ import (
// The variable names are case-insensitive, allow only alphanumeric characters
// and -, and must start with an alphabetic character:
//
// [section "subsection1"]
// option1 = value1
// option2
// [section "subsection2"]
// option3 = value2
// [section "subsection1"]
// option1 = value1
// option2
// [section "subsection2"]
// option3 = value2
//
type Section struct {
Name string
Options Options

View File

@ -3,68 +3,68 @@
// priorities. It support all pattern formats as specified in the original gitignore
// documentation, copied below:
//
// Pattern format
// ==============
// Pattern format
// ==============
//
// - A blank line matches no files, so it can serve as a separator for readability.
// - A blank line matches no files, so it can serve as a separator for readability.
//
// - A line starting with # serves as a comment. Put a backslash ("\") in front of
// the first hash for patterns that begin with a hash.
// - A line starting with # serves as a comment. Put a backslash ("\") in front of
// the first hash for patterns that begin with a hash.
//
// - Trailing spaces are ignored unless they are quoted with backslash ("\").
// - Trailing spaces are ignored unless they are quoted with backslash ("\").
//
// - An optional prefix "!" which negates the pattern; any matching file excluded
// by a previous pattern will become included again. It is not possible to
// re-include a file if a parent directory of that file is excluded.
// Git doesnt list excluded directories for performance reasons, so
// any patterns on contained files have no effect, no matter where they are
// defined. Put a backslash ("\") in front of the first "!" for patterns
// that begin with a literal "!", for example, "\!important!.txt".
// - An optional prefix "!" which negates the pattern; any matching file excluded
// by a previous pattern will become included again. It is not possible to
// re-include a file if a parent directory of that file is excluded.
// Git doesnt list excluded directories for performance reasons, so
// any patterns on contained files have no effect, no matter where they are
// defined. Put a backslash ("\") in front of the first "!" for patterns
// that begin with a literal "!", for example, "\!important!.txt".
//
// - If the pattern ends with a slash, it is removed for the purpose of the
// following description, but it would only find a match with a directory.
// In other words, foo/ will match a directory foo and paths underneath it,
// but will not match a regular file or a symbolic link foo (this is consistent
// with the way how pathspec works in general in Git).
// - If the pattern ends with a slash, it is removed for the purpose of the
// following description, but it would only find a match with a directory.
// In other words, foo/ will match a directory foo and paths underneath it,
// but will not match a regular file or a symbolic link foo (this is consistent
// with the way how pathspec works in general in Git).
//
// - If the pattern does not contain a slash /, Git treats it as a shell glob
// pattern and checks for a match against the pathname relative to the location
// of the .gitignore file (relative to the toplevel of the work tree if not
// from a .gitignore file).
// - If the pattern does not contain a slash /, Git treats it as a shell glob
// pattern and checks for a match against the pathname relative to the location
// of the .gitignore file (relative to the toplevel of the work tree if not
// from a .gitignore file).
//
// - Otherwise, Git treats the pattern as a shell glob suitable for consumption
// by fnmatch(3) with the FNM_PATHNAME flag: wildcards in the pattern will
// not match a / in the pathname. For example, "Documentation/*.html" matches
// "Documentation/git.html" but not "Documentation/ppc/ppc.html" or
// "tools/perf/Documentation/perf.html".
// - Otherwise, Git treats the pattern as a shell glob suitable for consumption
// by fnmatch(3) with the FNM_PATHNAME flag: wildcards in the pattern will
// not match a / in the pathname. For example, "Documentation/*.html" matches
// "Documentation/git.html" but not "Documentation/ppc/ppc.html" or
// "tools/perf/Documentation/perf.html".
//
// - A leading slash matches the beginning of the pathname. For example,
// "/*.c" matches "cat-file.c" but not "mozilla-sha1/sha1.c".
// - A leading slash matches the beginning of the pathname. For example,
// "/*.c" matches "cat-file.c" but not "mozilla-sha1/sha1.c".
//
// Two consecutive asterisks ("**") in patterns matched against full pathname
// may have special meaning:
// Two consecutive asterisks ("**") in patterns matched against full pathname
// may have special meaning:
//
// - A leading "**" followed by a slash means match in all directories.
// For example, "**/foo" matches file or directory "foo" anywhere, the same as
// pattern "foo". "**/foo/bar" matches file or directory "bar"
// anywhere that is directly under directory "foo".
// - A leading "**" followed by a slash means match in all directories.
// For example, "**/foo" matches file or directory "foo" anywhere, the same as
// pattern "foo". "**/foo/bar" matches file or directory "bar"
// anywhere that is directly under directory "foo".
//
// - A trailing "/**" matches everything inside. For example, "abc/**" matches
// all files inside directory "abc", relative to the location of the
// .gitignore file, with infinite depth.
// - A trailing "/**" matches everything inside. For example, "abc/**" matches
// all files inside directory "abc", relative to the location of the
// .gitignore file, with infinite depth.
//
// - A slash followed by two consecutive asterisks then a slash matches
// zero or more directories. For example, "a/**/b" matches "a/b", "a/x/b",
// "a/x/y/b" and so on.
// - A slash followed by two consecutive asterisks then a slash matches
// zero or more directories. For example, "a/**/b" matches "a/b", "a/x/b",
// "a/x/y/b" and so on.
//
// - Other consecutive asterisks are considered invalid.
// - Other consecutive asterisks are considered invalid.
//
// Copyright and license
// =====================
// Copyright and license
// =====================
//
// Copyright (c) Oleg Sklyar, Silvertern and source{d}
// Copyright (c) Oleg Sklyar, Silvertern and source{d}
//
// The package code was donated to source{d} to include, modify and develop
// further as a part of the `go-git` project, release it on the license of
// the whole project or delete it from the project.
// The package code was donated to source{d} to include, modify and develop
// further as a part of the `go-git` project, release it on the license of
// the whole project or delete it from the project.
package gitignore

View File

@ -1,127 +1,127 @@
// Package idxfile implements encoding and decoding of packfile idx files.
//
// == Original (version 1) pack-*.idx files have the following format:
// == Original (version 1) pack-*.idx files have the following format:
//
// - The header consists of 256 4-byte network byte order
// integers. N-th entry of this table records the number of
// objects in the corresponding pack, the first byte of whose
// object name is less than or equal to N. This is called the
// 'first-level fan-out' table.
// - The header consists of 256 4-byte network byte order
// integers. N-th entry of this table records the number of
// objects in the corresponding pack, the first byte of whose
// object name is less than or equal to N. This is called the
// 'first-level fan-out' table.
//
// - The header is followed by sorted 24-byte entries, one entry
// per object in the pack. Each entry is:
// - The header is followed by sorted 24-byte entries, one entry
// per object in the pack. Each entry is:
//
// 4-byte network byte order integer, recording where the
// object is stored in the packfile as the offset from the
// beginning.
// 4-byte network byte order integer, recording where the
// object is stored in the packfile as the offset from the
// beginning.
//
// 20-byte object name.
// 20-byte object name.
//
// - The file is concluded with a trailer:
// - The file is concluded with a trailer:
//
// A copy of the 20-byte SHA1 checksum at the end of
// corresponding packfile.
// A copy of the 20-byte SHA1 checksum at the end of
// corresponding packfile.
//
// 20-byte SHA1-checksum of all of the above.
// 20-byte SHA1-checksum of all of the above.
//
// Pack Idx file:
// Pack Idx file:
//
// -- +--------------------------------+
// fanout | fanout[0] = 2 (for example) |-.
// table +--------------------------------+ |
// | fanout[1] | |
// +--------------------------------+ |
// | fanout[2] | |
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ |
// | fanout[255] = total objects |---.
// -- +--------------------------------+ | |
// main | offset | | |
// index | object name 00XXXXXXXXXXXXXXXX | | |
// tab +--------------------------------+ | |
// | offset | | |
// | object name 00XXXXXXXXXXXXXXXX | | |
// +--------------------------------+<+ |
// .-| offset | |
// | | object name 01XXXXXXXXXXXXXXXX | |
// | +--------------------------------+ |
// | | offset | |
// | | object name 01XXXXXXXXXXXXXXXX | |
// | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ |
// | | offset | |
// | | object name FFXXXXXXXXXXXXXXXX | |
// --| +--------------------------------+<--+
// trailer | | packfile checksum |
// | +--------------------------------+
// | | idxfile checksum |
// | +--------------------------------+
// .---------.
// |
// Pack file entry: <+
// -- +--------------------------------+
// fanout | fanout[0] = 2 (for example) |-.
// table +--------------------------------+ |
// | fanout[1] | |
// +--------------------------------+ |
// | fanout[2] | |
// ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ |
// | fanout[255] = total objects |---.
// -- +--------------------------------+ | |
// main | offset | | |
// index | object name 00XXXXXXXXXXXXXXXX | | |
// tab +--------------------------------+ | |
// | offset | | |
// | object name 00XXXXXXXXXXXXXXXX | | |
// +--------------------------------+<+ |
// .-| offset | |
// | | object name 01XXXXXXXXXXXXXXXX | |
// | +--------------------------------+ |
// | | offset | |
// | | object name 01XXXXXXXXXXXXXXXX | |
// | ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ |
// | | offset | |
// | | object name FFXXXXXXXXXXXXXXXX | |
// --| +--------------------------------+<--+
// trailer | | packfile checksum |
// | +--------------------------------+
// | | idxfile checksum |
// | +--------------------------------+
// .---------.
// |
// Pack file entry: <+
//
// packed object header:
// 1-byte size extension bit (MSB)
// type (next 3 bit)
// size0 (lower 4-bit)
// n-byte sizeN (as long as MSB is set, each 7-bit)
// size0..sizeN form 4+7+7+..+7 bit integer, size0
// is the least significant part, and sizeN is the
// most significant part.
// packed object data:
// If it is not DELTA, then deflated bytes (the size above
// is the size before compression).
// If it is REF_DELTA, then
// 20-byte base object name SHA1 (the size above is the
// size of the delta data that follows).
// delta data, deflated.
// If it is OFS_DELTA, then
// n-byte offset (see below) interpreted as a negative
// offset from the type-byte of the header of the
// ofs-delta entry (the size above is the size of
// the delta data that follows).
// delta data, deflated.
// packed object header:
// 1-byte size extension bit (MSB)
// type (next 3 bit)
// size0 (lower 4-bit)
// n-byte sizeN (as long as MSB is set, each 7-bit)
// size0..sizeN form 4+7+7+..+7 bit integer, size0
// is the least significant part, and sizeN is the
// most significant part.
// packed object data:
// If it is not DELTA, then deflated bytes (the size above
// is the size before compression).
// If it is REF_DELTA, then
// 20-byte base object name SHA1 (the size above is the
// size of the delta data that follows).
// delta data, deflated.
// If it is OFS_DELTA, then
// n-byte offset (see below) interpreted as a negative
// offset from the type-byte of the header of the
// ofs-delta entry (the size above is the size of
// the delta data that follows).
// delta data, deflated.
//
// offset encoding:
// n bytes with MSB set in all but the last one.
// The offset is then the number constructed by
// concatenating the lower 7 bit of each byte, and
// for n >= 2 adding 2^7 + 2^14 + ... + 2^(7*(n-1))
// to the result.
// offset encoding:
// n bytes with MSB set in all but the last one.
// The offset is then the number constructed by
// concatenating the lower 7 bit of each byte, and
// for n >= 2 adding 2^7 + 2^14 + ... + 2^(7*(n-1))
// to the result.
//
// == Version 2 pack-*.idx files support packs larger than 4 GiB, and
// have some other reorganizations. They have the format:
// == Version 2 pack-*.idx files support packs larger than 4 GiB, and
// have some other reorganizations. They have the format:
//
// - A 4-byte magic number '\377tOc' which is an unreasonable
// fanout[0] value.
// - A 4-byte magic number '\377tOc' which is an unreasonable
// fanout[0] value.
//
// - A 4-byte version number (= 2)
// - A 4-byte version number (= 2)
//
// - A 256-entry fan-out table just like v1.
// - A 256-entry fan-out table just like v1.
//
// - A table of sorted 20-byte SHA1 object names. These are
// packed together without offset values to reduce the cache
// footprint of the binary search for a specific object name.
// - A table of sorted 20-byte SHA1 object names. These are
// packed together without offset values to reduce the cache
// footprint of the binary search for a specific object name.
//
// - A table of 4-byte CRC32 values of the packed object data.
// This is new in v2 so compressed data can be copied directly
// from pack to pack during repacking without undetected
// data corruption.
// - A table of 4-byte CRC32 values of the packed object data.
// This is new in v2 so compressed data can be copied directly
// from pack to pack during repacking without undetected
// data corruption.
//
// - A table of 4-byte offset values (in network byte order).
// These are usually 31-bit pack file offsets, but large
// offsets are encoded as an index into the next table with
// the msbit set.
// - A table of 4-byte offset values (in network byte order).
// These are usually 31-bit pack file offsets, but large
// offsets are encoded as an index into the next table with
// the msbit set.
//
// - A table of 8-byte offset entries (empty for pack files less
// than 2 GiB). Pack files are organized with heavily used
// objects toward the front, so most object references should
// not need to refer to this table.
// - A table of 8-byte offset entries (empty for pack files less
// than 2 GiB). Pack files are organized with heavily used
// objects toward the front, so most object references should
// not need to refer to this table.
//
// - The same trailer as a v1 pack file:
// - The same trailer as a v1 pack file:
//
// A copy of the 20-byte SHA1 checksum at the end of
// corresponding packfile.
// A copy of the 20-byte SHA1 checksum at the end of
// corresponding packfile.
//
// 20-byte SHA1-checksum of all of the above.
// 20-byte SHA1-checksum of all of the above.
//
// Source:
// https://www.kernel.org/pub/software/scm/git/docs/v1.7.5/technical/pack-format.txt

View File

@ -1,360 +1,360 @@
// Package index implements encoding and decoding of index format files.
//
// Git index format
// ================
// Git index format
// ================
//
// == The Git index file has the following format
// == The Git index file has the following format
//
// All binary numbers are in network byte order. Version 2 is described
// here unless stated otherwise.
// All binary numbers are in network byte order. Version 2 is described
// here unless stated otherwise.
//
// - A 12-byte header consisting of
// - A 12-byte header consisting of
//
// 4-byte signature:
// The signature is { 'D', 'I', 'R', 'C' } (stands for "dircache")
// 4-byte signature:
// The signature is { 'D', 'I', 'R', 'C' } (stands for "dircache")
//
// 4-byte version number:
// The current supported versions are 2, 3 and 4.
// 4-byte version number:
// The current supported versions are 2, 3 and 4.
//
// 32-bit number of index entries.
// 32-bit number of index entries.
//
// - A number of sorted index entries (see below).
// - A number of sorted index entries (see below).
//
// - Extensions
// - Extensions
//
// Extensions are identified by signature. Optional extensions can
// be ignored if Git does not understand them.
// Extensions are identified by signature. Optional extensions can
// be ignored if Git does not understand them.
//
// Git currently supports cached tree and resolve undo extensions.
// Git currently supports cached tree and resolve undo extensions.
//
// 4-byte extension signature. If the first byte is 'A'..'Z' the
// extension is optional and can be ignored.
// 4-byte extension signature. If the first byte is 'A'..'Z' the
// extension is optional and can be ignored.
//
// 32-bit size of the extension
// 32-bit size of the extension
//
// Extension data
// Extension data
//
// - 160-bit SHA-1 over the content of the index file before this
// checksum.
// - 160-bit SHA-1 over the content of the index file before this
// checksum.
//
// == Index entry
// == Index entry
//
// Index entries are sorted in ascending order on the name field,
// interpreted as a string of unsigned bytes (i.e. memcmp() order, no
// localization, no special casing of directory separator '/'). Entries
// with the same name are sorted by their stage field.
// Index entries are sorted in ascending order on the name field,
// interpreted as a string of unsigned bytes (i.e. memcmp() order, no
// localization, no special casing of directory separator '/'). Entries
// with the same name are sorted by their stage field.
//
// 32-bit ctime seconds, the last time a file's metadata changed
// this is stat(2) data
// 32-bit ctime seconds, the last time a file's metadata changed
// this is stat(2) data
//
// 32-bit ctime nanosecond fractions
// this is stat(2) data
// 32-bit ctime nanosecond fractions
// this is stat(2) data
//
// 32-bit mtime seconds, the last time a file's data changed
// this is stat(2) data
// 32-bit mtime seconds, the last time a file's data changed
// this is stat(2) data
//
// 32-bit mtime nanosecond fractions
// this is stat(2) data
// 32-bit mtime nanosecond fractions
// this is stat(2) data
//
// 32-bit dev
// this is stat(2) data
// 32-bit dev
// this is stat(2) data
//
// 32-bit ino
// this is stat(2) data
// 32-bit ino
// this is stat(2) data
//
// 32-bit mode, split into (high to low bits)
// 32-bit mode, split into (high to low bits)
//
// 4-bit object type
// valid values in binary are 1000 (regular file), 1010 (symbolic link)
// and 1110 (gitlink)
// 4-bit object type
// valid values in binary are 1000 (regular file), 1010 (symbolic link)
// and 1110 (gitlink)
//
// 3-bit unused
// 3-bit unused
//
// 9-bit unix permission. Only 0755 and 0644 are valid for regular files.
// Symbolic links and gitlinks have value 0 in this field.
// 9-bit unix permission. Only 0755 and 0644 are valid for regular files.
// Symbolic links and gitlinks have value 0 in this field.
//
// 32-bit uid
// this is stat(2) data
// 32-bit uid
// this is stat(2) data
//
// 32-bit gid
// this is stat(2) data
// 32-bit gid
// this is stat(2) data
//
// 32-bit file size
// This is the on-disk size from stat(2), truncated to 32-bit.
// 32-bit file size
// This is the on-disk size from stat(2), truncated to 32-bit.
//
// 160-bit SHA-1 for the represented object
// 160-bit SHA-1 for the represented object
//
// A 16-bit 'flags' field split into (high to low bits)
// A 16-bit 'flags' field split into (high to low bits)
//
// 1-bit assume-valid flag
// 1-bit assume-valid flag
//
// 1-bit extended flag (must be zero in version 2)
// 1-bit extended flag (must be zero in version 2)
//
// 2-bit stage (during merge)
// 2-bit stage (during merge)
//
// 12-bit name length if the length is less than 0xFFF; otherwise 0xFFF
// is stored in this field.
// 12-bit name length if the length is less than 0xFFF; otherwise 0xFFF
// is stored in this field.
//
// (Version 3 or later) A 16-bit field, only applicable if the
// "extended flag" above is 1, split into (high to low bits).
// (Version 3 or later) A 16-bit field, only applicable if the
// "extended flag" above is 1, split into (high to low bits).
//
// 1-bit reserved for future
// 1-bit reserved for future
//
// 1-bit skip-worktree flag (used by sparse checkout)
// 1-bit skip-worktree flag (used by sparse checkout)
//
// 1-bit intent-to-add flag (used by "git add -N")
// 1-bit intent-to-add flag (used by "git add -N")
//
// 13-bit unused, must be zero
// 13-bit unused, must be zero
//
// Entry path name (variable length) relative to top level directory
// (without leading slash). '/' is used as path separator. The special
// path components ".", ".." and ".git" (without quotes) are disallowed.
// Trailing slash is also disallowed.
// Entry path name (variable length) relative to top level directory
// (without leading slash). '/' is used as path separator. The special
// path components ".", ".." and ".git" (without quotes) are disallowed.
// Trailing slash is also disallowed.
//
// The exact encoding is undefined, but the '.' and '/' characters
// are encoded in 7-bit ASCII and the encoding cannot contain a NUL
// byte (iow, this is a UNIX pathname).
// The exact encoding is undefined, but the '.' and '/' characters
// are encoded in 7-bit ASCII and the encoding cannot contain a NUL
// byte (iow, this is a UNIX pathname).
//
// (Version 4) In version 4, the entry path name is prefix-compressed
// relative to the path name for the previous entry (the very first
// entry is encoded as if the path name for the previous entry is an
// empty string). At the beginning of an entry, an integer N in the
// variable width encoding (the same encoding as the offset is encoded
// for OFS_DELTA pack entries; see pack-format.txt) is stored, followed
// by a NUL-terminated string S. Removing N bytes from the end of the
// path name for the previous entry, and replacing it with the string S
// yields the path name for this entry.
// (Version 4) In version 4, the entry path name is prefix-compressed
// relative to the path name for the previous entry (the very first
// entry is encoded as if the path name for the previous entry is an
// empty string). At the beginning of an entry, an integer N in the
// variable width encoding (the same encoding as the offset is encoded
// for OFS_DELTA pack entries; see pack-format.txt) is stored, followed
// by a NUL-terminated string S. Removing N bytes from the end of the
// path name for the previous entry, and replacing it with the string S
// yields the path name for this entry.
//
// 1-8 nul bytes as necessary to pad the entry to a multiple of eight bytes
// while keeping the name NUL-terminated.
// 1-8 nul bytes as necessary to pad the entry to a multiple of eight bytes
// while keeping the name NUL-terminated.
//
// (Version 4) In version 4, the padding after the pathname does not
// exist.
// (Version 4) In version 4, the padding after the pathname does not
// exist.
//
// Interpretation of index entries in split index mode is completely
// different. See below for details.
// Interpretation of index entries in split index mode is completely
// different. See below for details.
//
// == Extensions
// == Extensions
//
// === Cached tree
// === Cached tree
//
// Cached tree extension contains pre-computed hashes for trees that can
// be derived from the index. It helps speed up tree object generation
// from index for a new commit.
// Cached tree extension contains pre-computed hashes for trees that can
// be derived from the index. It helps speed up tree object generation
// from index for a new commit.
//
// When a path is updated in index, the path must be invalidated and
// removed from tree cache.
// When a path is updated in index, the path must be invalidated and
// removed from tree cache.
//
// The signature for this extension is { 'T', 'R', 'E', 'E' }.
// The signature for this extension is { 'T', 'R', 'E', 'E' }.
//
// A series of entries fill the entire extension; each of which
// consists of:
// A series of entries fill the entire extension; each of which
// consists of:
//
// - NUL-terminated path component (relative to its parent directory);
// - NUL-terminated path component (relative to its parent directory);
//
// - ASCII decimal number of entries in the index that is covered by the
// tree this entry represents (entry_count);
// - ASCII decimal number of entries in the index that is covered by the
// tree this entry represents (entry_count);
//
// - A space (ASCII 32);
// - A space (ASCII 32);
//
// - ASCII decimal number that represents the number of subtrees this
// tree has;
// - ASCII decimal number that represents the number of subtrees this
// tree has;
//
// - A newline (ASCII 10); and
// - A newline (ASCII 10); and
//
// - 160-bit object name for the object that would result from writing
// this span of index as a tree.
// - 160-bit object name for the object that would result from writing
// this span of index as a tree.
//
// An entry can be in an invalidated state and is represented by having
// a negative number in the entry_count field. In this case, there is no
// object name and the next entry starts immediately after the newline.
// When writing an invalid entry, -1 should always be used as entry_count.
// An entry can be in an invalidated state and is represented by having
// a negative number in the entry_count field. In this case, there is no
// object name and the next entry starts immediately after the newline.
// When writing an invalid entry, -1 should always be used as entry_count.
//
// The entries are written out in the top-down, depth-first order. The
// first entry represents the root level of the repository, followed by the
// first subtree--let's call this A--of the root level (with its name
// relative to the root level), followed by the first subtree of A (with
// its name relative to A), ...
// The entries are written out in the top-down, depth-first order. The
// first entry represents the root level of the repository, followed by the
// first subtree--let's call this A--of the root level (with its name
// relative to the root level), followed by the first subtree of A (with
// its name relative to A), ...
//
// === Resolve undo
// === Resolve undo
//
// A conflict is represented in the index as a set of higher stage entries.
// When a conflict is resolved (e.g. with "git add path"), these higher
// stage entries will be removed and a stage-0 entry with proper resolution
// is added.
// A conflict is represented in the index as a set of higher stage entries.
// When a conflict is resolved (e.g. with "git add path"), these higher
// stage entries will be removed and a stage-0 entry with proper resolution
// is added.
//
// When these higher stage entries are removed, they are saved in the
// resolve undo extension, so that conflicts can be recreated (e.g. with
// "git checkout -m"), in case users want to redo a conflict resolution
// from scratch.
// When these higher stage entries are removed, they are saved in the
// resolve undo extension, so that conflicts can be recreated (e.g. with
// "git checkout -m"), in case users want to redo a conflict resolution
// from scratch.
//
// The signature for this extension is { 'R', 'E', 'U', 'C' }.
// The signature for this extension is { 'R', 'E', 'U', 'C' }.
//
// A series of entries fill the entire extension; each of which
// consists of:
// A series of entries fill the entire extension; each of which
// consists of:
//
// - NUL-terminated pathname the entry describes (relative to the root of
// the repository, i.e. full pathname);
// - NUL-terminated pathname the entry describes (relative to the root of
// the repository, i.e. full pathname);
//
// - Three NUL-terminated ASCII octal numbers, entry mode of entries in
// stage 1 to 3 (a missing stage is represented by "0" in this field);
// and
// - Three NUL-terminated ASCII octal numbers, entry mode of entries in
// stage 1 to 3 (a missing stage is represented by "0" in this field);
// and
//
// - At most three 160-bit object names of the entry in stages from 1 to 3
// (nothing is written for a missing stage).
// - At most three 160-bit object names of the entry in stages from 1 to 3
// (nothing is written for a missing stage).
//
// === Split index
// === Split index
//
// In split index mode, the majority of index entries could be stored
// in a separate file. This extension records the changes to be made on
// top of that to produce the final index.
// In split index mode, the majority of index entries could be stored
// in a separate file. This extension records the changes to be made on
// top of that to produce the final index.
//
// The signature for this extension is { 'l', 'i', 'n', 'k' }.
// The signature for this extension is { 'l', 'i', 'n', 'k' }.
//
// The extension consists of:
// The extension consists of:
//
// - 160-bit SHA-1 of the shared index file. The shared index file path
// is $GIT_DIR/sharedindex.<SHA-1>. If all 160 bits are zero, the
// index does not require a shared index file.
// - 160-bit SHA-1 of the shared index file. The shared index file path
// is $GIT_DIR/sharedindex.<SHA-1>. If all 160 bits are zero, the
// index does not require a shared index file.
//
// - An ewah-encoded delete bitmap, each bit represents an entry in the
// shared index. If a bit is set, its corresponding entry in the
// shared index will be removed from the final index. Note, because
// a delete operation changes index entry positions, but we do need
// original positions in replace phase, it's best to just mark
// entries for removal, then do a mass deletion after replacement.
// - An ewah-encoded delete bitmap, each bit represents an entry in the
// shared index. If a bit is set, its corresponding entry in the
// shared index will be removed from the final index. Note, because
// a delete operation changes index entry positions, but we do need
// original positions in replace phase, it's best to just mark
// entries for removal, then do a mass deletion after replacement.
//
// - An ewah-encoded replace bitmap, each bit represents an entry in
// the shared index. If a bit is set, its corresponding entry in the
// shared index will be replaced with an entry in this index
// file. All replaced entries are stored in sorted order in this
// index. The first "1" bit in the replace bitmap corresponds to the
// first index entry, the second "1" bit to the second entry and so
// on. Replaced entries may have empty path names to save space.
// - An ewah-encoded replace bitmap, each bit represents an entry in
// the shared index. If a bit is set, its corresponding entry in the
// shared index will be replaced with an entry in this index
// file. All replaced entries are stored in sorted order in this
// index. The first "1" bit in the replace bitmap corresponds to the
// first index entry, the second "1" bit to the second entry and so
// on. Replaced entries may have empty path names to save space.
//
// The remaining index entries after replaced ones will be added to the
// final index. These added entries are also sorted by entry name then
// stage.
// The remaining index entries after replaced ones will be added to the
// final index. These added entries are also sorted by entry name then
// stage.
//
// == Untracked cache
// == Untracked cache
//
// Untracked cache saves the untracked file list and necessary data to
// verify the cache. The signature for this extension is { 'U', 'N',
// 'T', 'R' }.
// Untracked cache saves the untracked file list and necessary data to
// verify the cache. The signature for this extension is { 'U', 'N',
// 'T', 'R' }.
//
// The extension starts with
// The extension starts with
//
// - A sequence of NUL-terminated strings, preceded by the size of the
// sequence in variable width encoding. Each string describes the
// environment where the cache can be used.
// - A sequence of NUL-terminated strings, preceded by the size of the
// sequence in variable width encoding. Each string describes the
// environment where the cache can be used.
//
// - Stat data of $GIT_DIR/info/exclude. See "Index entry" section from
// ctime field until "file size".
// - Stat data of $GIT_DIR/info/exclude. See "Index entry" section from
// ctime field until "file size".
//
// - Stat data of plumbing.excludesfile
// - Stat data of plumbing.excludesfile
//
// - 32-bit dir_flags (see struct dir_struct)
// - 32-bit dir_flags (see struct dir_struct)
//
// - 160-bit SHA-1 of $GIT_DIR/info/exclude. Null SHA-1 means the file
// does not exist.
// - 160-bit SHA-1 of $GIT_DIR/info/exclude. Null SHA-1 means the file
// does not exist.
//
// - 160-bit SHA-1 of plumbing.excludesfile. Null SHA-1 means the file does
// not exist.
// - 160-bit SHA-1 of plumbing.excludesfile. Null SHA-1 means the file does
// not exist.
//
// - NUL-terminated string of per-dir exclude file name. This usually
// is ".gitignore".
// - NUL-terminated string of per-dir exclude file name. This usually
// is ".gitignore".
//
// - The number of following directory blocks, variable width
// encoding. If this number is zero, the extension ends here with a
// following NUL.
// - The number of following directory blocks, variable width
// encoding. If this number is zero, the extension ends here with a
// following NUL.
//
// - A number of directory blocks in depth-first-search order, each
// consists of
// - A number of directory blocks in depth-first-search order, each
// consists of
//
// - The number of untracked entries, variable width encoding.
// - The number of untracked entries, variable width encoding.
//
// - The number of sub-directory blocks, variable width encoding.
// - The number of sub-directory blocks, variable width encoding.
//
// - The directory name terminated by NUL.
// - The directory name terminated by NUL.
//
// - A number of untracked file/dir names terminated by NUL.
// - A number of untracked file/dir names terminated by NUL.
//
// The remaining data of each directory block is grouped by type:
// The remaining data of each directory block is grouped by type:
//
// - An ewah bitmap, the n-th bit marks whether the n-th directory has
// valid untracked cache entries.
// - An ewah bitmap, the n-th bit marks whether the n-th directory has
// valid untracked cache entries.
//
// - An ewah bitmap, the n-th bit records "check-only" bit of
// read_directory_recursive() for the n-th directory.
// - An ewah bitmap, the n-th bit records "check-only" bit of
// read_directory_recursive() for the n-th directory.
//
// - An ewah bitmap, the n-th bit indicates whether SHA-1 and stat data
// is valid for the n-th directory and exists in the next data.
// - An ewah bitmap, the n-th bit indicates whether SHA-1 and stat data
// is valid for the n-th directory and exists in the next data.
//
// - An array of stat data. The n-th data corresponds with the n-th
// "one" bit in the previous ewah bitmap.
// - An array of stat data. The n-th data corresponds with the n-th
// "one" bit in the previous ewah bitmap.
//
// - An array of SHA-1. The n-th SHA-1 corresponds with the n-th "one" bit
// in the previous ewah bitmap.
// - An array of SHA-1. The n-th SHA-1 corresponds with the n-th "one" bit
// in the previous ewah bitmap.
//
// - One NUL.
// - One NUL.
//
// == File System Monitor cache
// == File System Monitor cache
//
// The file system monitor cache tracks files for which the core.fsmonitor
// hook has told us about changes. The signature for this extension is
// { 'F', 'S', 'M', 'N' }.
// The file system monitor cache tracks files for which the core.fsmonitor
// hook has told us about changes. The signature for this extension is
// { 'F', 'S', 'M', 'N' }.
//
// The extension starts with
// The extension starts with
//
// - 32-bit version number: the current supported version is 1.
// - 32-bit version number: the current supported version is 1.
//
// - 64-bit time: the extension data reflects all changes through the given
// time which is stored as the nanoseconds elapsed since midnight,
// January 1, 1970.
// - 64-bit time: the extension data reflects all changes through the given
// time which is stored as the nanoseconds elapsed since midnight,
// January 1, 1970.
//
// - 32-bit bitmap size: the size of the CE_FSMONITOR_VALID bitmap.
// - 32-bit bitmap size: the size of the CE_FSMONITOR_VALID bitmap.
//
// - An ewah bitmap, the n-th bit indicates whether the n-th index entry
// is not CE_FSMONITOR_VALID.
// - An ewah bitmap, the n-th bit indicates whether the n-th index entry
// is not CE_FSMONITOR_VALID.
//
// == End of Index Entry
// == End of Index Entry
//
// The End of Index Entry (EOIE) is used to locate the end of the variable
// length index entries and the beginning of the extensions. Code can take
// advantage of this to quickly locate the index extensions without having
// to parse through all of the index entries.
// The End of Index Entry (EOIE) is used to locate the end of the variable
// length index entries and the beginning of the extensions. Code can take
// advantage of this to quickly locate the index extensions without having
// to parse through all of the index entries.
//
// Because it must be able to be loaded before the variable length cache
// entries and other index extensions, this extension must be written last.
// The signature for this extension is { 'E', 'O', 'I', 'E' }.
// Because it must be able to be loaded before the variable length cache
// entries and other index extensions, this extension must be written last.
// The signature for this extension is { 'E', 'O', 'I', 'E' }.
//
// The extension consists of:
// The extension consists of:
//
// - 32-bit offset to the end of the index entries
// - 32-bit offset to the end of the index entries
//
// - 160-bit SHA-1 over the extension types and their sizes (but not
// their contents). E.g. if we have "TREE" extension that is N-bytes
// long, "REUC" extension that is M-bytes long, followed by "EOIE",
// then the hash would be:
// - 160-bit SHA-1 over the extension types and their sizes (but not
// their contents). E.g. if we have "TREE" extension that is N-bytes
// long, "REUC" extension that is M-bytes long, followed by "EOIE",
// then the hash would be:
//
// SHA-1("TREE" + <binary representation of N> +
// "REUC" + <binary representation of M>)
// SHA-1("TREE" + <binary representation of N> +
// "REUC" + <binary representation of M>)
//
// == Index Entry Offset Table
// == Index Entry Offset Table
//
// The Index Entry Offset Table (IEOT) is used to help address the CPU
// cost of loading the index by enabling multi-threading the process of
// converting cache entries from the on-disk format to the in-memory format.
// The signature for this extension is { 'I', 'E', 'O', 'T' }.
// The Index Entry Offset Table (IEOT) is used to help address the CPU
// cost of loading the index by enabling multi-threading the process of
// converting cache entries from the on-disk format to the in-memory format.
// The signature for this extension is { 'I', 'E', 'O', 'T' }.
//
// The extension consists of:
// The extension consists of:
//
// - 32-bit version (currently 1)
// - 32-bit version (currently 1)
//
// - A number of index offset entries each consisting of:
// - A number of index offset entries each consisting of:
//
// - 32-bit offset from the beginning of the file to the first cache entry
// in this block of entries.
// - 32-bit offset from the beginning of the file to the first cache entry
// in this block of entries.
//
// - 32-bit count of cache entries in this blockpackage index
// - 32-bit count of cache entries in this blockpackage index
package index

View File

@ -203,8 +203,8 @@ type ResolveUndoEntry struct {
// can take advantage of this to quickly locate the index extensions without
// having to parse through all of the index entries.
//
// Because it must be able to be loaded before the variable length cache
// entries and other index extensions, this extension must be written last.
// Because it must be able to be loaded before the variable length cache
// entries and other index extensions, this extension must be written last.
type EndOfIndexEntry struct {
// Offset to the end of the index entries
Offset uint32

View File

@ -1,37 +1,38 @@
// Package packfile implements encoding and decoding of packfile format.
//
// == pack-*.pack files have the following format:
// == pack-*.pack files have the following format:
//
// - A header appears at the beginning and consists of the following:
// - A header appears at the beginning and consists of the following:
//
// 4-byte signature:
// The signature is: {'P', 'A', 'C', 'K'}
// 4-byte signature:
// The signature is: {'P', 'A', 'C', 'K'}
//
// 4-byte version number (network byte order):
// GIT currently accepts version number 2 or 3 but
// generates version 2 only.
// 4-byte version number (network byte order):
// GIT currently accepts version number 2 or 3 but
// generates version 2 only.
//
// 4-byte number of objects contained in the pack (network byte order)
// 4-byte number of objects contained in the pack (network byte order)
//
// Observation: we cannot have more than 4G versions ;-) and
// more than 4G objects in a pack.
// Observation: we cannot have more than 4G versions ;-) and
// more than 4G objects in a pack.
//
// - The header is followed by number of object entries, each of
// which looks like this:
// - The header is followed by number of object entries, each of
// which looks like this:
//
// (undeltified representation)
// n-byte type and length (3-bit type, (n-1)*7+4-bit length)
// compressed data
// (undeltified representation)
// n-byte type and length (3-bit type, (n-1)*7+4-bit length)
// compressed data
//
// (deltified representation)
// n-byte type and length (3-bit type, (n-1)*7+4-bit length)
// 20-byte base object name
// compressed delta data
// (deltified representation)
// n-byte type and length (3-bit type, (n-1)*7+4-bit length)
// 20-byte base object name
// compressed delta data
//
// Observation: length of each object is encoded in a variable
// length format and is not constrained to 32-bit or anything.
// Observation: length of each object is encoded in a variable
// length format and is not constrained to 32-bit or anything.
//
// - The trailer records 20-byte SHA1 checksum of all of the above.
//
// - The trailer records 20-byte SHA1 checksum of all of the above.
//
// Source:
// https://www.kernel.org/pub/software/scm/git/docs/v1.7.5/technical/pack-protocol.txt

View File

@ -24,18 +24,18 @@ var ErrUnsupportedObject = errors.New("unsupported object type")
// Object is returned when an object can be of any type. It is frequently used
// with a type cast to acquire the specific type of object:
//
// func process(obj Object) {
// switch o := obj.(type) {
// case *Commit:
// // o is a Commit
// case *Tree:
// // o is a Tree
// case *Blob:
// // o is a Blob
// case *Tag:
// // o is a Tag
// }
// }
// func process(obj Object) {
// switch o := obj.(type) {
// case *Commit:
// // o is a Commit
// case *Tree:
// // o is a Tree
// case *Blob:
// // o is a Blob
// case *Tag:
// // o is a Tag
// }
// }
//
// This interface is intentionally different from plumbing.EncodedObject, which
// is a lower level interface used by storage implementations to read and write

View File

@ -48,7 +48,6 @@ func NewReferenceUpdateRequest() *ReferenceUpdateRequest {
// - ofs-delta
// - ref-delta
// - delete-refs
//
// It leaves up to the user to add the following capabilities later:
// - atomic
// - ofs-delta

View File

@ -80,9 +80,10 @@ func ReadUntilFromBufioReader(r *bufio.Reader, delim byte) ([]byte, error) {
//
// This is how the offset is saved in C:
//
// dheader[pos] = ofs & 127;
// while (ofs >>= 7)
// dheader[--pos] = 128 | (--ofs & 127);
// dheader[pos] = ofs & 127;
// while (ofs >>= 7)
// dheader[--pos] = 128 | (--ofs & 127);
//
func ReadVariableWidthInt(r io.Reader) (int64, error) {
var c byte
if err := Read(r, &c); err != nil {

View File

@ -42,9 +42,8 @@ func New(n noder.Noder) (*Frame, error) {
// separated by comas.
//
// Examples:
//
// []
// ["a", "b"]
// []
// ["a", "b"]
func (f *Frame) String() string {
var buf bytes.Buffer
_ = buf.WriteByte('[')

View File

@ -17,14 +17,15 @@ import (
// This is the kind of traversal you will expect when listing ordinary
// files and directories recursively, for example:
//
// Trie Traversal order
// ---- ---------------
// .
// / | \ c
// / | \ d/
// d c z ===> d/a
// / \ d/b
// b a z
// Trie Traversal order
// ---- ---------------
// .
// / | \ c
// / | \ d/
// d c z ===> d/a
// / \ d/b
// b a z
//
//
// This iterator is somewhat especial as you can chose to skip whole
// "directories" when iterating:

View File

@ -53,7 +53,7 @@ type Noder interface {
// implement NumChildren in O(1) while Children is usually more
// complex.
NumChildren() (int, error)
Skip() bool
Skip() bool
}
// NoChildren represents the children of a noder without children.

View File

@ -1,4 +1,3 @@
//go:build darwin || freebsd || netbsd
// +build darwin freebsd netbsd
package git

View File

@ -1,4 +1,3 @@
//go:build js
// +build js
package git

View File

@ -1,4 +1,3 @@
//go:build linux
// +build linux
package git

View File

@ -1,4 +1,3 @@
//go:build openbsd || dragonfly || solaris
// +build openbsd dragonfly solaris
package git

View File

@ -1,4 +1,3 @@
//go:build windows
// +build windows
package git

View File

@ -39,35 +39,35 @@ for a protocol buffer variable v:
- Names are turned from camel_case to CamelCase for export.
- There are no methods on v to set fields; just treat
them as structure fields.
them as structure fields.
- There are getters that return a field's value if set,
and return the field's default value if unset.
The getters work even if the receiver is a nil message.
and return the field's default value if unset.
The getters work even if the receiver is a nil message.
- The zero value for a struct is its correct initialization state.
All desired fields must be set before marshaling.
All desired fields must be set before marshaling.
- A Reset() method will restore a protobuf struct to its zero state.
- Non-repeated fields are pointers to the values; nil means unset.
That is, optional or required field int32 f becomes F *int32.
That is, optional or required field int32 f becomes F *int32.
- Repeated fields are slices.
- Helper functions are available to aid the setting of fields.
msg.Foo = proto.String("hello") // set field
msg.Foo = proto.String("hello") // set field
- Constants are defined to hold the default values of all fields that
have them. They have the form Default_StructName_FieldName.
Because the getter methods handle defaulted values,
direct use of these constants should be rare.
have them. They have the form Default_StructName_FieldName.
Because the getter methods handle defaulted values,
direct use of these constants should be rare.
- Enums are given type names and maps from names to values.
Enum values are prefixed by the enclosing message's name, or by the
enum's type name if it is a top-level enum. Enum types have a String
method, and a Enum method to assist in message construction.
Enum values are prefixed by the enclosing message's name, or by the
enum's type name if it is a top-level enum. Enum types have a String
method, and a Enum method to assist in message construction.
- Nested messages, groups and enums have type names prefixed with the name of
the surrounding message type.
the surrounding message type.
- Extensions are given descriptor names that start with E_,
followed by an underscore-delimited list of the nested messages
that contain it (if any) followed by the CamelCased name of the
extension field itself. HasExtension, ClearExtension, GetExtension
and SetExtension are functions for manipulating extensions.
followed by an underscore-delimited list of the nested messages
that contain it (if any) followed by the CamelCased name of the
extension field itself. HasExtension, ClearExtension, GetExtension
and SetExtension are functions for manipulating extensions.
- Oneof field sets are given a single field in their message,
with distinguished wrapper types for each possible field value.
with distinguished wrapper types for each possible field value.
- Marshal and Unmarshal are functions to encode and decode the wire format.
When the .proto file specifies `syntax="proto3"`, there are some differences:

View File

@ -29,7 +29,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//go:build purego || appengine || js
// +build purego appengine js
// This file contains an implementation of proto field accesses using package reflect.

View File

@ -26,7 +26,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//go:build purego || appengine || js
// +build purego appengine js
// This file contains an implementation of proto field accesses using package reflect.

View File

@ -29,7 +29,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//go:build !purego && !appengine && !js
// +build !purego,!appengine,!js
// This file contains the implementation of the proto field accesses using package unsafe.

View File

@ -26,7 +26,6 @@
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
//go:build !purego && !appengine && !js
// +build !purego,!appengine,!js
// This file contains the implementation of the proto field accesses using package unsafe.

View File

@ -2004,14 +2004,12 @@ func makeUnmarshalMap(f *reflect.StructField) unmarshaler {
// makeUnmarshalOneof makes an unmarshaler for oneof fields.
// for:
//
// message Msg {
// oneof F {
// int64 X = 1;
// float64 Y = 2;
// }
// }
//
// message Msg {
// oneof F {
// int64 X = 1;
// float64 Y = 2;
// }
// }
// typ is the type of the concrete entry for a oneof case (e.g. Msg_X).
// ityp is the interface type of the oneof field (e.g. isMsg_F).
// unmarshal is the unmarshaler for the base type of the oneof case (e.g. int64).

View File

@ -44,9 +44,9 @@ func (ps *pointerReferences) PushPair(vx, vy reflect.Value, d diffMode, deref bo
case diffUnknown, diffIdentical:
pp = [2]value.Pointer{value.PointerOf(vx), value.PointerOf(vy)}
case diffRemoved:
pp = [2]value.Pointer{value.PointerOf(vx), {}}
pp = [2]value.Pointer{value.PointerOf(vx), value.Pointer{}}
case diffInserted:
pp = [2]value.Pointer{{}, value.PointerOf(vy)}
pp = [2]value.Pointer{value.Pointer{}, value.PointerOf(vy)}
}
*ps = append(*ps, pp)
return pp

View File

@ -20,21 +20,22 @@ shell-style rules for quoting and commenting.
The basic use case uses the default ASCII lexer to split a string into sub-strings:
shlex.Split("one \"two three\" four") -> []string{"one", "two three", "four"}
shlex.Split("one \"two three\" four") -> []string{"one", "two three", "four"}
To process a stream of strings:
l := NewLexer(os.Stdin)
for ; token, err := l.Next(); err != nil {
// process token
}
l := NewLexer(os.Stdin)
for ; token, err := l.Next(); err != nil {
// process token
}
To access the raw token stream (which includes tokens for comments):
t := NewTokenizer(os.Stdin)
for ; token, err := t.Next(); err != nil {
// process token
}
t := NewTokenizer(os.Stdin)
for ; token, err := t.Next(); err != nil {
// process token
}
*/
package shlex

View File

@ -42,7 +42,7 @@ func NewDCESecurity(domain Domain, id uint32) (UUID, error) {
// NewDCEPerson returns a DCE Security (Version 2) UUID in the person
// domain with the id returned by os.Getuid.
//
// NewDCESecurity(Person, uint32(os.Getuid()))
// NewDCESecurity(Person, uint32(os.Getuid()))
func NewDCEPerson() (UUID, error) {
return NewDCESecurity(Person, uint32(os.Getuid()))
}
@ -50,7 +50,7 @@ func NewDCEPerson() (UUID, error) {
// NewDCEGroup returns a DCE Security (Version 2) UUID in the group
// domain with the id returned by os.Getgid.
//
// NewDCESecurity(Group, uint32(os.Getgid()))
// NewDCESecurity(Group, uint32(os.Getgid()))
func NewDCEGroup() (UUID, error) {
return NewDCESecurity(Group, uint32(os.Getgid()))
}

View File

@ -45,7 +45,7 @@ func NewHash(h hash.Hash, space UUID, data []byte, version int) UUID {
// NewMD5 returns a new MD5 (Version 3) UUID based on the
// supplied name space and data. It is the same as calling:
//
// NewHash(md5.New(), space, data, 3)
// NewHash(md5.New(), space, data, 3)
func NewMD5(space UUID, data []byte) UUID {
return NewHash(md5.New(), space, data, 3)
}
@ -53,7 +53,7 @@ func NewMD5(space UUID, data []byte) UUID {
// NewSHA1 returns a new SHA1 (Version 5) UUID based on the
// supplied name space and data. It is the same as calling:
//
// NewHash(sha1.New(), space, data, 5)
// NewHash(sha1.New(), space, data, 5)
func NewSHA1(space UUID, data []byte) UUID {
return NewHash(sha1.New(), space, data, 5)
}

View File

@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build js
// +build js
package uuid

View File

@ -2,7 +2,6 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !js
// +build !js
package uuid

View File

@ -17,14 +17,15 @@ var jsonNull = []byte("null")
// NullUUID implements the SQL driver.Scanner interface so
// it can be used as a scan destination:
//
// var u uuid.NullUUID
// err := db.QueryRow("SELECT name FROM foo WHERE id=?", id).Scan(&u)
// ...
// if u.Valid {
// // use u.UUID
// } else {
// // NULL value
// }
// var u uuid.NullUUID
// err := db.QueryRow("SELECT name FROM foo WHERE id=?", id).Scan(&u)
// ...
// if u.Valid {
// // use u.UUID
// } else {
// // NULL value
// }
//
type NullUUID struct {
UUID UUID
Valid bool // Valid is true if UUID is not NULL

View File

@ -187,12 +187,10 @@ func Must(uuid UUID, err error) UUID {
}
// Validate returns an error if s is not a properly formatted UUID in one of the following formats:
//
// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
// xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
// {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx}
//
// xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
// urn:uuid:xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx
// xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
// {xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx}
// It returns an error if the format is invalid, otherwise nil.
func Validate(s string) error {
switch len(s) {

View File

@ -9,7 +9,7 @@ import "io"
// New creates a new random UUID or panics. New is equivalent to
// the expression
//
// uuid.Must(uuid.NewRandom())
// uuid.Must(uuid.NewRandom())
func New() UUID {
return Must(NewRandom())
}
@ -17,7 +17,7 @@ func New() UUID {
// NewString creates a new random UUID and returns it as a string or panics.
// NewString is equivalent to the expression
//
// uuid.New().String()
// uuid.New().String()
func NewString() string {
return Must(NewRandom()).String()
}
@ -31,11 +31,11 @@ func NewString() string {
//
// A note about uniqueness derived from the UUID Wikipedia entry:
//
// Randomly generated UUIDs have 122 random bits. One's annual risk of being
// hit by a meteorite is estimated to be one chance in 17 billion, that
// means the probability is about 0.00000000006 (6 × 1011),
// equivalent to the odds of creating a few tens of trillions of UUIDs in a
// year and having one duplicate.
// Randomly generated UUIDs have 122 random bits. One's annual risk of being
// hit by a meteorite is estimated to be one chance in 17 billion, that
// means the probability is about 0.00000000006 (6 × 1011),
// equivalent to the odds of creating a few tens of trillions of UUIDs in a
// year and having one duplicate.
func NewRandom() (UUID, error) {
if !poolEnabled {
return NewRandomFromReader(rander)

View File

@ -16,4 +16,5 @@
// connecting to the same hosts repeatedly from the same client, you can use
// DefaultPooledClient to receive a client that has connection pooling
// semantics similar to http.DefaultClient.
//
package cleanhttp

View File

@ -23,7 +23,7 @@ var (
// Split splits a string according to /bin/sh's word-splitting rules. It
// supports backslash-escapes, single-quotes, and double-quotes. Notably it does
// not support the $ style of quoting. It also doesn't attempt to perform any
// not support the $'' style of quoting. It also doesn't attempt to perform any
// other sort of expansion, including brace expansion, shell expansion, or
// pathname expansion.
//

View File

@ -8,7 +8,7 @@
// the host name to match on ("example.com"), and the second argument is the key
// you want to retrieve ("Port"). The keywords are case insensitive.
//
// port := ssh_config.Get("myhost", "Port")
// port := ssh_config.Get("myhost", "Port")
//
// You can also manipulate an SSH config file and then print it or write it back
// to disk.
@ -792,7 +792,7 @@ func init() {
func newConfig() *Config {
return &Config{
Hosts: []*Host{
{
&Host{
implicit: true,
Patterns: []*Pattern{matchAll},
Nodes: make([]Node, 0),

View File

@ -11,8 +11,7 @@ import "math"
// comparing to the test values, this modified white reference is used internally.
//
// See this GitHub thread for details on these values:
//
// https://github.com/hsluv/hsluv/issues/79
// https://github.com/hsluv/hsluv/issues/79
var hSLuvD65 = [3]float64{0.95045592705167, 1.0, 1.089057750759878}
func LuvLChToHSLuv(l, c, h float64) (float64, float64, float64) {

View File

@ -42,8 +42,7 @@ func IsTerminal(fd uintptr) bool {
// Check pipe name is used for cygwin/msys2 pty.
// Cygwin/MSYS2 PTY has a name like:
//
// \{cygwin,msys}-XXXXXXXXXXXXXXXX-ptyN-{from,to}-master
// \{cygwin,msys}-XXXXXXXXXXXXXXXX-ptyN-{from,to}-master
func isCygwinPipeName(name string) bool {
token := strings.Split(name, "-")
if len(token) < 5 {

View File

@ -3,8 +3,8 @@ Package ansi is a small, fast library to create ANSI colored strings and codes.
Installation
# this installs the color viewer and the package
go get -u github.com/mgutz/ansi/cmd/ansi-mgutz
# this installs the color viewer and the package
go get -u github.com/mgutz/ansi/cmd/ansi-mgutz
Example

View File

@ -50,12 +50,13 @@ type GCMParams struct {
//
// Encrypt/Decrypt. As an example:
//
// gcmParams := pkcs11.NewGCMParams(make([]byte, 12), nil, 128)
// p.ctx.EncryptInit(session, []*pkcs11.Mechanism{pkcs11.NewMechanism(pkcs11.CKM_AES_GCM, gcmParams)},
// aesObjHandle)
// ct, _ := p.ctx.Encrypt(session, pt)
// iv := gcmParams.IV()
// gcmParams.Free()
// gcmParams := pkcs11.NewGCMParams(make([]byte, 12), nil, 128)
// p.ctx.EncryptInit(session, []*pkcs11.Mechanism{pkcs11.NewMechanism(pkcs11.CKM_AES_GCM, gcmParams)},
// aesObjHandle)
// ct, _ := p.ctx.Encrypt(session, pt)
// iv := gcmParams.IV()
// gcmParams.Free()
//
func NewGCMParams(iv, aad []byte, tagSize int) *GCMParams {
return &GCMParams{
iv: iv,

View File

@ -30,7 +30,7 @@ import (
//
// The following is an example of the contents of Digest types:
//
// sha256:7173b809ca12ec5dee4506cd86be934c4596dd234ee82c0662eac04a8c2c71dc
// sha256:7173b809ca12ec5dee4506cd86be934c4596dd234ee82c0662eac04a8c2c71dc
//
// This allows to abstract the digest behind this type and work only in those
// terms.

View File

@ -19,16 +19,16 @@
// More importantly, it provides tools and wrappers to work with
// hash.Hash-based digests with little effort.
//
// # Basics
// Basics
//
// The format of a digest is simply a string with two parts, dubbed the
// "algorithm" and the "digest", separated by a colon:
//
// <algorithm>:<digest>
// <algorithm>:<digest>
//
// An example of a sha256 digest representation follows:
//
// sha256:7173b809ca12ec5dee4506cd86be934c4596dd234ee82c0662eac04a8c2c71dc
// sha256:7173b809ca12ec5dee4506cd86be934c4596dd234ee82c0662eac04a8c2c71dc
//
// The "algorithm" portion defines both the hashing algorithm used to calculate
// the digest and the encoding of the resulting digest, which defaults to "hex"
@ -42,7 +42,7 @@
// obtained, comparisons are cheap, quick and simple to express with the
// standard equality operator.
//
// # Verification
// Verification
//
// The main benefit of using the Digest type is simple verification against a
// given digest. The Verifier interface, modeled after the stdlib hash.Hash
@ -50,7 +50,7 @@
// writing is complete, calling the Verifier.Verified method will indicate
// whether or not the stream of bytes matches the target digest.
//
// # Missing Features
// Missing Features
//
// In addition to the above, we intend to add the following features to this
// package:
@ -58,4 +58,5 @@
// 1. A Digester type that supports write sink digest calculation.
//
// 2. Suspend and resume of ongoing digest calculations to support efficient digest verification in the registry.
//
package digest

View File

@ -2,89 +2,89 @@
//
// The traditional error handling idiom in Go is roughly akin to
//
// if err != nil {
// return err
// }
// if err != nil {
// return err
// }
//
// which when applied recursively up the call stack results in error reports
// without context or debugging information. The errors package allows
// programmers to add context to the failure path in their code in a way
// that does not destroy the original value of the error.
//
// # Adding context to an error
// Adding context to an error
//
// The errors.Wrap function returns a new error that adds context to the
// original error by recording a stack trace at the point Wrap is called,
// together with the supplied message. For example
//
// _, err := ioutil.ReadAll(r)
// if err != nil {
// return errors.Wrap(err, "read failed")
// }
// _, err := ioutil.ReadAll(r)
// if err != nil {
// return errors.Wrap(err, "read failed")
// }
//
// If additional control is required, the errors.WithStack and
// errors.WithMessage functions destructure errors.Wrap into its component
// operations: annotating an error with a stack trace and with a message,
// respectively.
//
// # Retrieving the cause of an error
// Retrieving the cause of an error
//
// Using errors.Wrap constructs a stack of errors, adding context to the
// preceding error. Depending on the nature of the error it may be necessary
// to reverse the operation of errors.Wrap to retrieve the original error
// for inspection. Any error value which implements this interface
//
// type causer interface {
// Cause() error
// }
// type causer interface {
// Cause() error
// }
//
// can be inspected by errors.Cause. errors.Cause will recursively retrieve
// the topmost error that does not implement causer, which is assumed to be
// the original cause. For example:
//
// switch err := errors.Cause(err).(type) {
// case *MyError:
// // handle specifically
// default:
// // unknown error
// }
// switch err := errors.Cause(err).(type) {
// case *MyError:
// // handle specifically
// default:
// // unknown error
// }
//
// Although the causer interface is not exported by this package, it is
// considered a part of its stable public interface.
//
// # Formatted printing of errors
// Formatted printing of errors
//
// All error values returned from this package implement fmt.Formatter and can
// be formatted by the fmt package. The following verbs are supported:
//
// %s print the error. If the error has a Cause it will be
// printed recursively.
// %v see %s
// %+v extended format. Each Frame of the error's StackTrace will
// be printed in detail.
// %s print the error. If the error has a Cause it will be
// printed recursively.
// %v see %s
// %+v extended format. Each Frame of the error's StackTrace will
// be printed in detail.
//
// # Retrieving the stack trace of an error or wrapper
// Retrieving the stack trace of an error or wrapper
//
// New, Errorf, Wrap, and Wrapf record a stack trace at the point they are
// invoked. This information can be retrieved with the following interface:
//
// type stackTracer interface {
// StackTrace() errors.StackTrace
// }
// type stackTracer interface {
// StackTrace() errors.StackTrace
// }
//
// The returned errors.StackTrace type is defined as
//
// type StackTrace []Frame
// type StackTrace []Frame
//
// The Frame type represents a call site in the stack trace. Frame supports
// the fmt.Formatter interface that can be used for printing information about
// the stack trace of this error. For example:
//
// if err, ok := err.(stackTracer); ok {
// for _, f := range err.StackTrace() {
// fmt.Printf("%+s:%d\n", f, f)
// }
// }
// if err, ok := err.(stackTracer); ok {
// for _, f := range err.StackTrace() {
// fmt.Printf("%+s:%d\n", f, f)
// }
// }
//
// Although the stackTracer interface is not exported by this package, it is
// considered a part of its stable public interface.
@ -265,9 +265,9 @@ func (w *withMessage) Format(s fmt.State, verb rune) {
// An error value has a cause if it implements the following
// interface:
//
// type causer interface {
// Cause() error
// }
// type causer interface {
// Cause() error
// }
//
// If the error does not implement Cause, the original error will
// be returned. If the error is nil, nil will be returned without further

View File

@ -1,4 +1,3 @@
//go:build go1.13
// +build go1.13
package errors

View File

@ -51,16 +51,16 @@ func (f Frame) name() string {
// Format formats the frame according to the fmt.Formatter interface.
//
// %s source file
// %d source line
// %n function name
// %v equivalent to %s:%d
// %s source file
// %d source line
// %n function name
// %v equivalent to %s:%d
//
// Format accepts flags that alter the printing of some verbs, as follows:
//
// %+s function name and path of source file relative to the compile time
// GOPATH separated by \n\t (<funcname>\n\t<path>)
// %+v equivalent to %+s:%d
// %+s function name and path of source file relative to the compile time
// GOPATH separated by \n\t (<funcname>\n\t<path>)
// %+v equivalent to %+s:%d
func (f Frame) Format(s fmt.State, verb rune) {
switch verb {
case 's':
@ -98,12 +98,12 @@ type StackTrace []Frame
// Format formats the stack of Frames according to the fmt.Formatter interface.
//
// %s lists source files for each Frame in the stack
// %v lists the source file and line number for each Frame in the stack
// %s lists source files for each Frame in the stack
// %v lists the source file and line number for each Frame in the stack
//
// Format accepts flags that alter the printing of some verbs, as follows:
//
// %+v Prints filename, function, and line number for each Frame in the stack.
// %+v Prints filename, function, and line number for each Frame in the stack.
func (st StackTrace) Format(s fmt.State, verb rune) {
switch verb {
case 'v':

View File

@ -161,12 +161,12 @@ func (m *SequenceMatcher) chainB() {
m.bJunk = map[string]struct{}{}
if m.IsJunk != nil {
junk := m.bJunk
for s := range b2j {
for s, _ := range b2j {
if m.IsJunk(s) {
junk[s] = struct{}{}
}
}
for s := range junk {
for s, _ := range junk {
delete(b2j, s)
}
}
@ -181,7 +181,7 @@ func (m *SequenceMatcher) chainB() {
popular[s] = struct{}{}
}
}
for s := range popular {
for s, _ := range popular {
delete(b2j, s)
}
}
@ -199,15 +199,12 @@ func (m *SequenceMatcher) isBJunk(s string) bool {
// If IsJunk is not defined:
//
// Return (i,j,k) such that a[i:i+k] is equal to b[j:j+k], where
//
// alo <= i <= i+k <= ahi
// blo <= j <= j+k <= bhi
//
// alo <= i <= i+k <= ahi
// blo <= j <= j+k <= bhi
// and for all (i',j',k') meeting those conditions,
//
// k >= k'
// i <= i'
// and if i == i', j <= j'
// k >= k'
// i <= i'
// and if i == i', j <= j'
//
// In other words, of all maximal matching blocks, return one that
// starts earliest in a, and of all those maximal matching blocks that
@ -419,7 +416,7 @@ func (m *SequenceMatcher) GetGroupedOpCodes(n int) [][]OpCode {
}
codes := m.GetOpCodes()
if len(codes) == 0 {
codes = []OpCode{{'e', 0, 1, 0, 1}}
codes = []OpCode{OpCode{'e', 0, 1, 0, 1}}
}
// Fixup leading and trailing groups if they show no changes.
if codes[0].Tag == 'e' {

View File

@ -1,8 +0,0 @@
*.out
*.swp
*.8
*.6
_obj
_test*
markdown
tags

View File

@ -1,17 +0,0 @@
sudo: false
language: go
go:
- "1.10.x"
- "1.11.x"
- tip
matrix:
fast_finish: true
allow_failures:
- go: tip
install:
- # Do nothing. This is needed to prevent default install action "go get -t -v ./..." from happening here (we want it to happen inside script step).
script:
- go get -t -v ./...
- diff -u <(echo -n) <(gofmt -d -s .)
- go tool vet .
- go test -v ./...

Some files were not shown because too many files have changed in this diff Show More