forked from toolshed/abra
chore: go mod vendor / tidy
This commit is contained in:
3
vendor/github.com/go-git/go-git/v5/COMPATIBILITY.md
generated
vendored
3
vendor/github.com/go-git/go-git/v5/COMPATIBILITY.md
generated
vendored
@ -11,7 +11,7 @@ compatibility status with go-git.
|
||||
| `init` | `--bare` | ✅ | | |
|
||||
| `init` | `--template` <br/> `--separate-git-dir` <br/> `--shared` | ❌ | | |
|
||||
| `clone` | | ✅ | | - [PlainClone](_examples/clone/main.go) |
|
||||
| `clone` | Authentication: <br/> - none <br/> - access token <br/> - username + password <br/> - ssh | ✅ | | - [clone ssh](_examples/clone/auth/ssh/main.go) <br/> - [clone access token](_examples/clone/auth/basic/access_token/main.go) <br/> - [clone user + password](_examples/clone/auth/basic/username_password/main.go) |
|
||||
| `clone` | Authentication: <br/> - none <br/> - access token <br/> - username + password <br/> - ssh | ✅ | | - [clone ssh (private_key)](_examples/clone/auth/ssh/private_key/main.go) <br/> - [clone ssh (ssh_agent)](_examples/clone/auth/ssh/ssh_agent/main.go) <br/> - [clone access token](_examples/clone/auth/basic/access_token/main.go) <br/> - [clone user + password](_examples/clone/auth/basic/username_password/main.go) |
|
||||
| `clone` | `--progress` <br/> `--single-branch` <br/> `--depth` <br/> `--origin` <br/> `--recurse-submodules` <br/>`--shared` | ✅ | | - [recurse submodules](_examples/clone/main.go) <br/> - [progress](_examples/progress/main.go) |
|
||||
|
||||
## Basic snapshotting
|
||||
@ -34,6 +34,7 @@ compatibility status with go-git.
|
||||
| `merge` | | ⚠️ (partial) | Fast-forward only | |
|
||||
| `mergetool` | | ❌ | | |
|
||||
| `stash` | | ❌ | | |
|
||||
| `sparse-checkout` | | ✅ | | - [sparse-checkout](_examples/sparse-checkout/main.go) |
|
||||
| `tag` | | ✅ | | - [tag](_examples/tag/main.go) <br/> - [tag create and push](_examples/tag-create-push/main.go) |
|
||||
|
||||
## Sharing and updating projects
|
||||
|
7
vendor/github.com/go-git/go-git/v5/CONTRIBUTING.md
generated
vendored
7
vendor/github.com/go-git/go-git/v5/CONTRIBUTING.md
generated
vendored
@ -31,6 +31,13 @@ In order for a PR to be accepted it needs to pass a list of requirements:
|
||||
- If the PR is a new feature, it has to come with a suite of unit tests, that tests the new functionality.
|
||||
- In any case, all the PRs have to pass the personal evaluation of at least one of the maintainers of go-git.
|
||||
|
||||
### Branches
|
||||
|
||||
The `master` branch is currently used for maintaining the `v5` major release only. The accepted changes would
|
||||
be dependency bumps, bug fixes and small changes that aren't needed for `v6`. New development should target the
|
||||
`v6-exp` branch, and if agreed with at least one go-git maintainer, it can be back ported to `v5` by creating
|
||||
a new PR that targets `master`.
|
||||
|
||||
### Format of the commit message
|
||||
|
||||
Every commit message should describe what was changed, under which context and, if applicable, the GitHub issue it relates to:
|
||||
|
13
vendor/github.com/go-git/go-git/v5/blame.go
generated
vendored
13
vendor/github.com/go-git/go-git/v5/blame.go
generated
vendored
@ -97,13 +97,10 @@ func Blame(c *object.Commit, path string) (*BlameResult, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if finished == true {
|
||||
if finished {
|
||||
break
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
b.lineToCommit = make([]*object.Commit, finalLength)
|
||||
for i := range needsMap {
|
||||
@ -309,8 +306,8 @@ func (b *blame) addBlames(curItems []*queueItem) (bool, error) {
|
||||
for h := range hunks {
|
||||
hLines := countLines(hunks[h].Text)
|
||||
for hl := 0; hl < hLines; hl++ {
|
||||
switch {
|
||||
case hunks[h].Type == diffmatchpatch.DiffEqual:
|
||||
switch hunks[h].Type {
|
||||
case diffmatchpatch.DiffEqual:
|
||||
prevl++
|
||||
curl++
|
||||
if curl == curItem.NeedsMap[need].Cur {
|
||||
@ -322,7 +319,7 @@ func (b *blame) addBlames(curItems []*queueItem) (bool, error) {
|
||||
break out
|
||||
}
|
||||
}
|
||||
case hunks[h].Type == diffmatchpatch.DiffInsert:
|
||||
case diffmatchpatch.DiffInsert:
|
||||
curl++
|
||||
if curl == curItem.NeedsMap[need].Cur {
|
||||
// the line we want is added, it may have been added here (or by another parent), skip it for now
|
||||
@ -331,7 +328,7 @@ func (b *blame) addBlames(curItems []*queueItem) (bool, error) {
|
||||
break out
|
||||
}
|
||||
}
|
||||
case hunks[h].Type == diffmatchpatch.DiffDelete:
|
||||
case diffmatchpatch.DiffDelete:
|
||||
prevl += hLines
|
||||
continue out
|
||||
default:
|
||||
|
2
vendor/github.com/go-git/go-git/v5/config/config.go
generated
vendored
2
vendor/github.com/go-git/go-git/v5/config/config.go
generated
vendored
@ -252,6 +252,7 @@ const (
|
||||
extensionsSection = "extensions"
|
||||
fetchKey = "fetch"
|
||||
urlKey = "url"
|
||||
pushurlKey = "pushurl"
|
||||
bareKey = "bare"
|
||||
worktreeKey = "worktree"
|
||||
commentCharKey = "commentChar"
|
||||
@ -633,6 +634,7 @@ func (c *RemoteConfig) unmarshal(s *format.Subsection) error {
|
||||
|
||||
c.Name = c.raw.Name
|
||||
c.URLs = append([]string(nil), c.raw.Options.GetAll(urlKey)...)
|
||||
c.URLs = append(c.URLs, c.raw.Options.GetAll(pushurlKey)...)
|
||||
c.Fetch = fetch
|
||||
c.Mirror = c.raw.Options.Get(mirrorKey) == "true"
|
||||
|
||||
|
7
vendor/github.com/go-git/go-git/v5/internal/revision/scanner.go
generated
vendored
7
vendor/github.com/go-git/go-git/v5/internal/revision/scanner.go
generated
vendored
@ -43,6 +43,11 @@ func tokenizeExpression(ch rune, tokenType token, check runeCategoryValidator, r
|
||||
return tokenType, string(data), nil
|
||||
}
|
||||
|
||||
// maxRevisionLength holds the maximum length that will be parsed for a
|
||||
// revision. Git itself doesn't enforce a max length, but rather leans on
|
||||
// the OS to enforce it via its ARG_MAX.
|
||||
const maxRevisionLength = 128 * 1024 // 128kb
|
||||
|
||||
var zeroRune = rune(0)
|
||||
|
||||
// scanner represents a lexical scanner.
|
||||
@ -52,7 +57,7 @@ type scanner struct {
|
||||
|
||||
// newScanner returns a new instance of scanner.
|
||||
func newScanner(r io.Reader) *scanner {
|
||||
return &scanner{r: bufio.NewReader(r)}
|
||||
return &scanner{r: bufio.NewReader(io.LimitReader(r, maxRevisionLength))}
|
||||
}
|
||||
|
||||
// Scan extracts tokens and their strings counterpart
|
||||
|
26
vendor/github.com/go-git/go-git/v5/options.go
generated
vendored
26
vendor/github.com/go-git/go-git/v5/options.go
generated
vendored
@ -416,6 +416,9 @@ type ResetOptions struct {
|
||||
// the index (resetting it to the tree of Commit) and the working tree
|
||||
// depending on Mode. If empty MixedReset is used.
|
||||
Mode ResetMode
|
||||
// Files, if not empty will constrain the reseting the index to only files
|
||||
// specified in this list.
|
||||
Files []string
|
||||
}
|
||||
|
||||
// Validate validates the fields and sets the default values.
|
||||
@ -790,3 +793,26 @@ type PlainInitOptions struct {
|
||||
|
||||
// Validate validates the fields and sets the default values.
|
||||
func (o *PlainInitOptions) Validate() error { return nil }
|
||||
|
||||
var (
|
||||
ErrNoRestorePaths = errors.New("you must specify path(s) to restore")
|
||||
)
|
||||
|
||||
// RestoreOptions describes how a restore should be performed.
|
||||
type RestoreOptions struct {
|
||||
// Marks to restore the content in the index
|
||||
Staged bool
|
||||
// Marks to restore the content of the working tree
|
||||
Worktree bool
|
||||
// List of file paths that will be restored
|
||||
Files []string
|
||||
}
|
||||
|
||||
// Validate validates the fields and sets the default values.
|
||||
func (o *RestoreOptions) Validate() error {
|
||||
if len(o.Files) == 0 {
|
||||
return ErrNoRestorePaths
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
4
vendor/github.com/go-git/go-git/v5/plumbing/format/gitignore/dir.go
generated
vendored
4
vendor/github.com/go-git/go-git/v5/plumbing/format/gitignore/dir.go
generated
vendored
@ -64,6 +64,10 @@ func ReadPatterns(fs billy.Filesystem, path []string) (ps []Pattern, err error)
|
||||
|
||||
for _, fi := range fis {
|
||||
if fi.IsDir() && fi.Name() != gitDir {
|
||||
if NewMatcher(ps).Match(append(path, fi.Name()), true) {
|
||||
continue
|
||||
}
|
||||
|
||||
var subps []Pattern
|
||||
subps, err = ReadPatterns(fs, append(path, fi.Name()))
|
||||
if err != nil {
|
||||
|
115
vendor/github.com/go-git/go-git/v5/plumbing/format/index/decoder.go
generated
vendored
115
vendor/github.com/go-git/go-git/v5/plumbing/format/index/decoder.go
generated
vendored
@ -24,8 +24,8 @@ var (
|
||||
// ErrInvalidChecksum is returned by Decode if the SHA1 hash mismatch with
|
||||
// the read content
|
||||
ErrInvalidChecksum = errors.New("invalid checksum")
|
||||
|
||||
errUnknownExtension = errors.New("unknown extension")
|
||||
// ErrUnknownExtension is returned when an index extension is encountered that is considered mandatory
|
||||
ErrUnknownExtension = errors.New("unknown extension")
|
||||
)
|
||||
|
||||
const (
|
||||
@ -39,6 +39,7 @@ const (
|
||||
|
||||
// A Decoder reads and decodes index files from an input stream.
|
||||
type Decoder struct {
|
||||
buf *bufio.Reader
|
||||
r io.Reader
|
||||
hash hash.Hash
|
||||
lastEntry *Entry
|
||||
@ -49,8 +50,10 @@ type Decoder struct {
|
||||
// NewDecoder returns a new decoder that reads from r.
|
||||
func NewDecoder(r io.Reader) *Decoder {
|
||||
h := hash.New(hash.CryptoType)
|
||||
buf := bufio.NewReader(r)
|
||||
return &Decoder{
|
||||
r: io.TeeReader(r, h),
|
||||
buf: buf,
|
||||
r: io.TeeReader(buf, h),
|
||||
hash: h,
|
||||
extReader: bufio.NewReader(nil),
|
||||
}
|
||||
@ -210,71 +213,75 @@ func (d *Decoder) readExtensions(idx *Index) error {
|
||||
// count that they are not supported by jgit or libgit
|
||||
|
||||
var expected []byte
|
||||
var peeked []byte
|
||||
var err error
|
||||
|
||||
var header [4]byte
|
||||
// we should always be able to peek for 4 bytes (header) + 4 bytes (extlen) + final hash
|
||||
// if this fails, we know that we're at the end of the index
|
||||
peekLen := 4 + 4 + d.hash.Size()
|
||||
|
||||
for {
|
||||
expected = d.hash.Sum(nil)
|
||||
|
||||
var n int
|
||||
if n, err = io.ReadFull(d.r, header[:]); err != nil {
|
||||
if n == 0 {
|
||||
err = io.EOF
|
||||
}
|
||||
|
||||
peeked, err = d.buf.Peek(peekLen)
|
||||
if len(peeked) < peekLen {
|
||||
// there can't be an extension at this point, so let's bail out
|
||||
break
|
||||
}
|
||||
|
||||
err = d.readExtension(idx, header[:])
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if err != errUnknownExtension {
|
||||
return err
|
||||
}
|
||||
|
||||
return d.readChecksum(expected, header)
|
||||
}
|
||||
|
||||
func (d *Decoder) readExtension(idx *Index, header []byte) error {
|
||||
switch {
|
||||
case bytes.Equal(header, treeExtSignature):
|
||||
r, err := d.getExtensionReader()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = d.readExtension(idx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return d.readChecksum(expected)
|
||||
}
|
||||
|
||||
func (d *Decoder) readExtension(idx *Index) error {
|
||||
var header [4]byte
|
||||
|
||||
if _, err := io.ReadFull(d.r, header[:]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
r, err := d.getExtensionReader()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
switch {
|
||||
case bytes.Equal(header[:], treeExtSignature):
|
||||
idx.Cache = &Tree{}
|
||||
d := &treeExtensionDecoder{r}
|
||||
if err := d.Decode(idx.Cache); err != nil {
|
||||
return err
|
||||
}
|
||||
case bytes.Equal(header, resolveUndoExtSignature):
|
||||
r, err := d.getExtensionReader()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case bytes.Equal(header[:], resolveUndoExtSignature):
|
||||
idx.ResolveUndo = &ResolveUndo{}
|
||||
d := &resolveUndoDecoder{r}
|
||||
if err := d.Decode(idx.ResolveUndo); err != nil {
|
||||
return err
|
||||
}
|
||||
case bytes.Equal(header, endOfIndexEntryExtSignature):
|
||||
r, err := d.getExtensionReader()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
case bytes.Equal(header[:], endOfIndexEntryExtSignature):
|
||||
idx.EndOfIndexEntry = &EndOfIndexEntry{}
|
||||
d := &endOfIndexEntryDecoder{r}
|
||||
if err := d.Decode(idx.EndOfIndexEntry); err != nil {
|
||||
return err
|
||||
}
|
||||
default:
|
||||
return errUnknownExtension
|
||||
// See https://git-scm.com/docs/index-format, which says:
|
||||
// If the first byte is 'A'..'Z' the extension is optional and can be ignored.
|
||||
if header[0] < 'A' || header[0] > 'Z' {
|
||||
return ErrUnknownExtension
|
||||
}
|
||||
|
||||
d := &unknownExtensionDecoder{r}
|
||||
if err := d.Decode(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
@ -290,11 +297,10 @@ func (d *Decoder) getExtensionReader() (*bufio.Reader, error) {
|
||||
return d.extReader, nil
|
||||
}
|
||||
|
||||
func (d *Decoder) readChecksum(expected []byte, alreadyRead [4]byte) error {
|
||||
func (d *Decoder) readChecksum(expected []byte) error {
|
||||
var h plumbing.Hash
|
||||
copy(h[:4], alreadyRead[:])
|
||||
|
||||
if _, err := io.ReadFull(d.r, h[4:]); err != nil {
|
||||
if _, err := io.ReadFull(d.r, h[:]); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@ -476,3 +482,22 @@ func (d *endOfIndexEntryDecoder) Decode(e *EndOfIndexEntry) error {
|
||||
_, err = io.ReadFull(d.r, e.Hash[:])
|
||||
return err
|
||||
}
|
||||
|
||||
type unknownExtensionDecoder struct {
|
||||
r *bufio.Reader
|
||||
}
|
||||
|
||||
func (d *unknownExtensionDecoder) Decode() error {
|
||||
var buf [1024]byte
|
||||
|
||||
for {
|
||||
_, err := d.r.Read(buf[:])
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
94
vendor/github.com/go-git/go-git/v5/plumbing/format/index/encoder.go
generated
vendored
94
vendor/github.com/go-git/go-git/v5/plumbing/format/index/encoder.go
generated
vendored
@ -3,8 +3,11 @@ package index
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"path"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/go-git/go-git/v5/plumbing/hash"
|
||||
@ -13,7 +16,7 @@ import (
|
||||
|
||||
var (
|
||||
// EncodeVersionSupported is the range of supported index versions
|
||||
EncodeVersionSupported uint32 = 3
|
||||
EncodeVersionSupported uint32 = 4
|
||||
|
||||
// ErrInvalidTimestamp is returned by Encode if a Index with a Entry with
|
||||
// negative timestamp values
|
||||
@ -22,20 +25,25 @@ var (
|
||||
|
||||
// An Encoder writes an Index to an output stream.
|
||||
type Encoder struct {
|
||||
w io.Writer
|
||||
hash hash.Hash
|
||||
w io.Writer
|
||||
hash hash.Hash
|
||||
lastEntry *Entry
|
||||
}
|
||||
|
||||
// NewEncoder returns a new encoder that writes to w.
|
||||
func NewEncoder(w io.Writer) *Encoder {
|
||||
h := hash.New(hash.CryptoType)
|
||||
mw := io.MultiWriter(w, h)
|
||||
return &Encoder{mw, h}
|
||||
return &Encoder{mw, h, nil}
|
||||
}
|
||||
|
||||
// Encode writes the Index to the stream of the encoder.
|
||||
func (e *Encoder) Encode(idx *Index) error {
|
||||
// TODO: support v4
|
||||
return e.encode(idx, true)
|
||||
}
|
||||
|
||||
func (e *Encoder) encode(idx *Index, footer bool) error {
|
||||
|
||||
// TODO: support extensions
|
||||
if idx.Version > EncodeVersionSupported {
|
||||
return ErrUnsupportedVersion
|
||||
@ -49,7 +57,10 @@ func (e *Encoder) Encode(idx *Index) error {
|
||||
return err
|
||||
}
|
||||
|
||||
return e.encodeFooter()
|
||||
if footer {
|
||||
return e.encodeFooter()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *Encoder) encodeHeader(idx *Index) error {
|
||||
@ -64,7 +75,7 @@ func (e *Encoder) encodeEntries(idx *Index) error {
|
||||
sort.Sort(byName(idx.Entries))
|
||||
|
||||
for _, entry := range idx.Entries {
|
||||
if err := e.encodeEntry(entry); err != nil {
|
||||
if err := e.encodeEntry(idx, entry); err != nil {
|
||||
return err
|
||||
}
|
||||
entryLength := entryHeaderLength
|
||||
@ -73,7 +84,7 @@ func (e *Encoder) encodeEntries(idx *Index) error {
|
||||
}
|
||||
|
||||
wrote := entryLength + len(entry.Name)
|
||||
if err := e.padEntry(wrote); err != nil {
|
||||
if err := e.padEntry(idx, wrote); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@ -81,7 +92,7 @@ func (e *Encoder) encodeEntries(idx *Index) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *Encoder) encodeEntry(entry *Entry) error {
|
||||
func (e *Encoder) encodeEntry(idx *Index, entry *Entry) error {
|
||||
sec, nsec, err := e.timeToUint32(&entry.CreatedAt)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -132,9 +143,68 @@ func (e *Encoder) encodeEntry(entry *Entry) error {
|
||||
return err
|
||||
}
|
||||
|
||||
switch idx.Version {
|
||||
case 2, 3:
|
||||
err = e.encodeEntryName(entry)
|
||||
case 4:
|
||||
err = e.encodeEntryNameV4(entry)
|
||||
default:
|
||||
err = ErrUnsupportedVersion
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
func (e *Encoder) encodeEntryName(entry *Entry) error {
|
||||
return binary.Write(e.w, []byte(entry.Name))
|
||||
}
|
||||
|
||||
func (e *Encoder) encodeEntryNameV4(entry *Entry) error {
|
||||
name := entry.Name
|
||||
l := 0
|
||||
if e.lastEntry != nil {
|
||||
dir := path.Dir(e.lastEntry.Name) + "/"
|
||||
if strings.HasPrefix(entry.Name, dir) {
|
||||
l = len(e.lastEntry.Name) - len(dir)
|
||||
name = strings.TrimPrefix(entry.Name, dir)
|
||||
} else {
|
||||
l = len(e.lastEntry.Name)
|
||||
}
|
||||
}
|
||||
|
||||
e.lastEntry = entry
|
||||
|
||||
err := binary.WriteVariableWidthInt(e.w, int64(l))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return binary.Write(e.w, []byte(name+string('\x00')))
|
||||
}
|
||||
|
||||
func (e *Encoder) encodeRawExtension(signature string, data []byte) error {
|
||||
if len(signature) != 4 {
|
||||
return fmt.Errorf("invalid signature length")
|
||||
}
|
||||
|
||||
_, err := e.w.Write([]byte(signature))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = binary.WriteUint32(e.w, uint32(len(data)))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
_, err = e.w.Write(data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (e *Encoder) timeToUint32(t *time.Time) (uint32, uint32, error) {
|
||||
if t.IsZero() {
|
||||
return 0, 0, nil
|
||||
@ -147,7 +217,11 @@ func (e *Encoder) timeToUint32(t *time.Time) (uint32, uint32, error) {
|
||||
return uint32(t.Unix()), uint32(t.Nanosecond()), nil
|
||||
}
|
||||
|
||||
func (e *Encoder) padEntry(wrote int) error {
|
||||
func (e *Encoder) padEntry(idx *Index, wrote int) error {
|
||||
if idx.Version == 4 {
|
||||
return nil
|
||||
}
|
||||
|
||||
padLen := 8 - wrote%8
|
||||
|
||||
_, err := e.w.Write(bytes.Repeat([]byte{'\x00'}, padLen))
|
||||
|
20
vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/delta_index.go
generated
vendored
20
vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/delta_index.go
generated
vendored
@ -32,19 +32,17 @@ func (idx *deltaIndex) findMatch(src, tgt []byte, tgtOffset int) (srcOffset, l i
|
||||
return 0, -1
|
||||
}
|
||||
|
||||
if len(tgt) >= tgtOffset+s && len(src) >= blksz {
|
||||
h := hashBlock(tgt, tgtOffset)
|
||||
tIdx := h & idx.mask
|
||||
eIdx := idx.table[tIdx]
|
||||
if eIdx != 0 {
|
||||
srcOffset = idx.entries[eIdx]
|
||||
} else {
|
||||
return
|
||||
}
|
||||
|
||||
l = matchLength(src, tgt, tgtOffset, srcOffset)
|
||||
h := hashBlock(tgt, tgtOffset)
|
||||
tIdx := h & idx.mask
|
||||
eIdx := idx.table[tIdx]
|
||||
if eIdx == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
srcOffset = idx.entries[eIdx]
|
||||
|
||||
l = matchLength(src, tgt, tgtOffset, srcOffset)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
|
21
vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/patch_delta.go
generated
vendored
21
vendor/github.com/go-git/go-git/v5/plumbing/format/packfile/patch_delta.go
generated
vendored
@ -26,6 +26,13 @@ var (
|
||||
const (
|
||||
payload = 0x7f // 0111 1111
|
||||
continuation = 0x80 // 1000 0000
|
||||
|
||||
// maxPatchPreemptionSize defines what is the max size of bytes to be
|
||||
// premptively made available for a patch operation.
|
||||
maxPatchPreemptionSize uint = 65536
|
||||
|
||||
// minDeltaSize defines the smallest size for a delta.
|
||||
minDeltaSize = 4
|
||||
)
|
||||
|
||||
type offset struct {
|
||||
@ -86,9 +93,13 @@ func ApplyDelta(target, base plumbing.EncodedObject, delta []byte) (err error) {
|
||||
}
|
||||
|
||||
// PatchDelta returns the result of applying the modification deltas in delta to src.
|
||||
// An error will be returned if delta is corrupted (ErrDeltaLen) or an action command
|
||||
// An error will be returned if delta is corrupted (ErrInvalidDelta) or an action command
|
||||
// is not copy from source or copy from delta (ErrDeltaCmd).
|
||||
func PatchDelta(src, delta []byte) ([]byte, error) {
|
||||
if len(src) == 0 || len(delta) < minDeltaSize {
|
||||
return nil, ErrInvalidDelta
|
||||
}
|
||||
|
||||
b := &bytes.Buffer{}
|
||||
if err := patchDelta(b, src, delta); err != nil {
|
||||
return nil, err
|
||||
@ -239,7 +250,9 @@ func patchDelta(dst *bytes.Buffer, src, delta []byte) error {
|
||||
remainingTargetSz := targetSz
|
||||
|
||||
var cmd byte
|
||||
dst.Grow(int(targetSz))
|
||||
|
||||
growSz := min(targetSz, maxPatchPreemptionSize)
|
||||
dst.Grow(int(growSz))
|
||||
for {
|
||||
if len(delta) == 0 {
|
||||
return ErrInvalidDelta
|
||||
@ -403,6 +416,10 @@ func patchDeltaWriter(dst io.Writer, base io.ReaderAt, delta io.Reader,
|
||||
// This must be called twice on the delta data buffer, first to get the
|
||||
// expected source buffer size, and again to get the target buffer size.
|
||||
func decodeLEB128(input []byte) (uint, []byte) {
|
||||
if len(input) == 0 {
|
||||
return 0, input
|
||||
}
|
||||
|
||||
var num, sz uint
|
||||
var b byte
|
||||
for {
|
||||
|
2
vendor/github.com/go-git/go-git/v5/plumbing/format/pktline/scanner.go
generated
vendored
2
vendor/github.com/go-git/go-git/v5/plumbing/format/pktline/scanner.go
generated
vendored
@ -140,6 +140,8 @@ func asciiHexToByte(b byte) (byte, error) {
|
||||
return b - '0', nil
|
||||
case b >= 'a' && b <= 'f':
|
||||
return b - 'a' + 10, nil
|
||||
case b >= 'A' && b <= 'F':
|
||||
return b - 'A' + 10, nil
|
||||
default:
|
||||
return 0, ErrInvalidPktLen
|
||||
}
|
||||
|
1
vendor/github.com/go-git/go-git/v5/plumbing/object/signature.go
generated
vendored
1
vendor/github.com/go-git/go-git/v5/plumbing/object/signature.go
generated
vendored
@ -19,6 +19,7 @@ var (
|
||||
// a PKCS#7 (S/MIME) signature.
|
||||
x509SignatureFormat = signatureFormat{
|
||||
[]byte("-----BEGIN CERTIFICATE-----"),
|
||||
[]byte("-----BEGIN SIGNED MESSAGE-----"),
|
||||
}
|
||||
|
||||
// sshSignatureFormat is the format of an SSH signature.
|
||||
|
1
vendor/github.com/go-git/go-git/v5/plumbing/object/tree.go
generated
vendored
1
vendor/github.com/go-git/go-git/v5/plumbing/object/tree.go
generated
vendored
@ -295,6 +295,7 @@ func (s TreeEntrySorter) Swap(i, j int) {
|
||||
}
|
||||
|
||||
// Encode transforms a Tree into a plumbing.EncodedObject.
|
||||
// The tree entries must be sorted by name.
|
||||
func (t *Tree) Encode(o plumbing.EncodedObject) (err error) {
|
||||
o.SetType(plumbing.TreeObject)
|
||||
w, err := o.Writer()
|
||||
|
76
vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/filter.go
generated
vendored
Normal file
76
vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/filter.go
generated
vendored
Normal file
@ -0,0 +1,76 @@
|
||||
package packp
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"github.com/go-git/go-git/v5/plumbing"
|
||||
"net/url"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var ErrUnsupportedObjectFilterType = errors.New("unsupported object filter type")
|
||||
|
||||
// Filter values enable the partial clone capability which causes
|
||||
// the server to omit objects that match the filter.
|
||||
//
|
||||
// See [Git's documentation] for more details.
|
||||
//
|
||||
// [Git's documentation]: https://github.com/git/git/blob/e02ecfcc534e2021aae29077a958dd11c3897e4c/Documentation/rev-list-options.txt#L948
|
||||
type Filter string
|
||||
|
||||
type BlobLimitPrefix string
|
||||
|
||||
const (
|
||||
BlobLimitPrefixNone BlobLimitPrefix = ""
|
||||
BlobLimitPrefixKibi BlobLimitPrefix = "k"
|
||||
BlobLimitPrefixMebi BlobLimitPrefix = "m"
|
||||
BlobLimitPrefixGibi BlobLimitPrefix = "g"
|
||||
)
|
||||
|
||||
// FilterBlobNone omits all blobs.
|
||||
func FilterBlobNone() Filter {
|
||||
return "blob:none"
|
||||
}
|
||||
|
||||
// FilterBlobLimit omits blobs of size at least n bytes (when prefix is
|
||||
// BlobLimitPrefixNone), n kibibytes (when prefix is BlobLimitPrefixKibi),
|
||||
// n mebibytes (when prefix is BlobLimitPrefixMebi) or n gibibytes (when
|
||||
// prefix is BlobLimitPrefixGibi). n can be zero, in which case all blobs
|
||||
// will be omitted.
|
||||
func FilterBlobLimit(n uint64, prefix BlobLimitPrefix) Filter {
|
||||
return Filter(fmt.Sprintf("blob:limit=%d%s", n, prefix))
|
||||
}
|
||||
|
||||
// FilterTreeDepth omits all blobs and trees whose depth from the root tree
|
||||
// is larger or equal to depth.
|
||||
func FilterTreeDepth(depth uint64) Filter {
|
||||
return Filter(fmt.Sprintf("tree:%d", depth))
|
||||
}
|
||||
|
||||
// FilterObjectType omits all objects which are not of the requested type t.
|
||||
// Supported types are TagObject, CommitObject, TreeObject and BlobObject.
|
||||
func FilterObjectType(t plumbing.ObjectType) (Filter, error) {
|
||||
switch t {
|
||||
case plumbing.TagObject:
|
||||
fallthrough
|
||||
case plumbing.CommitObject:
|
||||
fallthrough
|
||||
case plumbing.TreeObject:
|
||||
fallthrough
|
||||
case plumbing.BlobObject:
|
||||
return Filter(fmt.Sprintf("object:type=%s", t.String())), nil
|
||||
default:
|
||||
return "", fmt.Errorf("%w: %s", ErrUnsupportedObjectFilterType, t.String())
|
||||
}
|
||||
}
|
||||
|
||||
// FilterCombine combines multiple Filter values together.
|
||||
func FilterCombine(filters ...Filter) Filter {
|
||||
var escapedFilters []string
|
||||
|
||||
for _, filter := range filters {
|
||||
escapedFilters = append(escapedFilters, url.QueryEscape(string(filter)))
|
||||
}
|
||||
|
||||
return Filter(fmt.Sprintf("combine:%s", strings.Join(escapedFilters, "+")))
|
||||
}
|
2
vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/sideband/demux.go
generated
vendored
2
vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/sideband/demux.go
generated
vendored
@ -114,7 +114,7 @@ func (d *Demuxer) nextPackData() ([]byte, error) {
|
||||
|
||||
size := len(content)
|
||||
if size == 0 {
|
||||
return nil, nil
|
||||
return nil, io.EOF
|
||||
} else if size > d.max {
|
||||
return nil, ErrMaxPackedExceeded
|
||||
}
|
||||
|
3
vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/srvresp.go
generated
vendored
3
vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/srvresp.go
generated
vendored
@ -120,6 +120,9 @@ func (r *ServerResponse) decodeACKLine(line []byte) error {
|
||||
}
|
||||
|
||||
sp := bytes.Index(line, []byte(" "))
|
||||
if sp+41 > len(line) {
|
||||
return fmt.Errorf("malformed ACK %q", line)
|
||||
}
|
||||
h := plumbing.NewHash(string(line[sp+1 : sp+41]))
|
||||
r.ACKs = append(r.ACKs, h)
|
||||
return nil
|
||||
|
1
vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/ulreq.go
generated
vendored
1
vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/ulreq.go
generated
vendored
@ -17,6 +17,7 @@ type UploadRequest struct {
|
||||
Wants []plumbing.Hash
|
||||
Shallows []plumbing.Hash
|
||||
Depth Depth
|
||||
Filter Filter
|
||||
}
|
||||
|
||||
// Depth values stores the desired depth of the requested packfile: see
|
||||
|
11
vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/ulreq_encode.go
generated
vendored
11
vendor/github.com/go-git/go-git/v5/plumbing/protocol/packp/ulreq_encode.go
generated
vendored
@ -132,6 +132,17 @@ func (e *ulReqEncoder) encodeDepth() stateFn {
|
||||
return nil
|
||||
}
|
||||
|
||||
return e.encodeFilter
|
||||
}
|
||||
|
||||
func (e *ulReqEncoder) encodeFilter() stateFn {
|
||||
if filter := e.data.Filter; filter != "" {
|
||||
if err := e.pe.Encodef("filter %s\n", filter); err != nil {
|
||||
e.err = fmt.Errorf("encoding filter %s: %s", filter, err)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
return e.encodeFlush
|
||||
}
|
||||
|
||||
|
4
vendor/github.com/go-git/go-git/v5/plumbing/reference.go
generated
vendored
4
vendor/github.com/go-git/go-git/v5/plumbing/reference.go
generated
vendored
@ -188,7 +188,7 @@ func (r ReferenceName) Validate() error {
|
||||
|
||||
isBranch := r.IsBranch()
|
||||
isTag := r.IsTag()
|
||||
for _, part := range parts {
|
||||
for i, part := range parts {
|
||||
// rule 6
|
||||
if len(part) == 0 {
|
||||
return ErrInvalidReferenceName
|
||||
@ -205,7 +205,7 @@ func (r ReferenceName) Validate() error {
|
||||
return ErrInvalidReferenceName
|
||||
}
|
||||
|
||||
if (isBranch || isTag) && strings.HasPrefix(part, "-") { // branches & tags can't start with -
|
||||
if (isBranch || isTag) && strings.HasPrefix(part, "-") && (i == 2) { // branches & tags can't start with -
|
||||
return ErrInvalidReferenceName
|
||||
}
|
||||
}
|
||||
|
7
vendor/github.com/go-git/go-git/v5/plumbing/transport/common.go
generated
vendored
7
vendor/github.com/go-git/go-git/v5/plumbing/transport/common.go
generated
vendored
@ -19,6 +19,7 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net/url"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
@ -295,7 +296,11 @@ func parseFile(endpoint string) (*Endpoint, bool) {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
path := endpoint
|
||||
path, err := filepath.Abs(endpoint)
|
||||
if err != nil {
|
||||
return nil, false
|
||||
}
|
||||
|
||||
return &Endpoint{
|
||||
Protocol: "file",
|
||||
Path: path,
|
||||
|
19
vendor/github.com/go-git/go-git/v5/plumbing/transport/file/client.go
generated
vendored
19
vendor/github.com/go-git/go-git/v5/plumbing/transport/file/client.go
generated
vendored
@ -7,6 +7,7 @@ import (
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
"strings"
|
||||
|
||||
"github.com/go-git/go-git/v5/plumbing/transport"
|
||||
@ -95,7 +96,23 @@ func (r *runner) Command(cmd string, ep *transport.Endpoint, auth transport.Auth
|
||||
}
|
||||
}
|
||||
|
||||
return &command{cmd: execabs.Command(cmd, ep.Path)}, nil
|
||||
return &command{cmd: execabs.Command(cmd, adjustPathForWindows(ep.Path))}, nil
|
||||
}
|
||||
|
||||
func isDriveLetter(c byte) bool {
|
||||
return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z')
|
||||
}
|
||||
|
||||
// On Windows, the path that results from a file: URL has a leading slash. This
|
||||
// has to be removed if there's a drive letter
|
||||
func adjustPathForWindows(p string) string {
|
||||
if runtime.GOOS != "windows" {
|
||||
return p
|
||||
}
|
||||
if len(p) >= 3 && p[0] == '/' && isDriveLetter(p[1]) && p[2] == ':' {
|
||||
return p[1:]
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
type command struct {
|
||||
|
6
vendor/github.com/go-git/go-git/v5/plumbing/transport/http/common.go
generated
vendored
6
vendor/github.com/go-git/go-git/v5/plumbing/transport/http/common.go
generated
vendored
@ -430,11 +430,11 @@ func NewErr(r *http.Response) error {
|
||||
|
||||
switch r.StatusCode {
|
||||
case http.StatusUnauthorized:
|
||||
return transport.ErrAuthenticationRequired
|
||||
return fmt.Errorf("%w: %s", transport.ErrAuthenticationRequired, reason)
|
||||
case http.StatusForbidden:
|
||||
return transport.ErrAuthorizationFailed
|
||||
return fmt.Errorf("%w: %s", transport.ErrAuthorizationFailed, reason)
|
||||
case http.StatusNotFound:
|
||||
return transport.ErrRepositoryNotFound
|
||||
return fmt.Errorf("%w: %s", transport.ErrRepositoryNotFound, reason)
|
||||
}
|
||||
|
||||
return plumbing.NewUnexpectedError(&Err{r, reason})
|
||||
|
12
vendor/github.com/go-git/go-git/v5/plumbing/transport/server/loader.go
generated
vendored
12
vendor/github.com/go-git/go-git/v5/plumbing/transport/server/loader.go
generated
vendored
@ -40,8 +40,16 @@ func (l *fsLoader) Load(ep *transport.Endpoint) (storer.Storer, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if _, err := fs.Stat("config"); err != nil {
|
||||
return nil, transport.ErrRepositoryNotFound
|
||||
var bare bool
|
||||
if _, err := fs.Stat("config"); err == nil {
|
||||
bare = true
|
||||
}
|
||||
|
||||
if !bare {
|
||||
// do not use git.GitDirName due to import cycle
|
||||
if _, err := fs.Stat(".git"); err != nil {
|
||||
return nil, transport.ErrRepositoryNotFound
|
||||
}
|
||||
}
|
||||
|
||||
return filesystem.NewStorage(fs, cache.NewObjectLRUDefault()), nil
|
||||
|
33
vendor/github.com/go-git/go-git/v5/remote.go
generated
vendored
33
vendor/github.com/go-git/go-git/v5/remote.go
generated
vendored
@ -9,6 +9,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/go-git/go-billy/v5/osfs"
|
||||
|
||||
"github.com/go-git/go-git/v5/config"
|
||||
"github.com/go-git/go-git/v5/internal/url"
|
||||
"github.com/go-git/go-git/v5/plumbing"
|
||||
@ -82,7 +83,7 @@ func (r *Remote) String() string {
|
||||
var fetch, push string
|
||||
if len(r.c.URLs) > 0 {
|
||||
fetch = r.c.URLs[0]
|
||||
push = r.c.URLs[0]
|
||||
push = r.c.URLs[len(r.c.URLs)-1]
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%s\t%s (fetch)\n%[1]s\t%[3]s (push)", r.c.Name, fetch, push)
|
||||
@ -109,8 +110,8 @@ func (r *Remote) PushContext(ctx context.Context, o *PushOptions) (err error) {
|
||||
return fmt.Errorf("remote names don't match: %s != %s", o.RemoteName, r.c.Name)
|
||||
}
|
||||
|
||||
if o.RemoteURL == "" {
|
||||
o.RemoteURL = r.c.URLs[0]
|
||||
if o.RemoteURL == "" && len(r.c.URLs) > 0 {
|
||||
o.RemoteURL = r.c.URLs[len(r.c.URLs)-1]
|
||||
}
|
||||
|
||||
s, err := newSendPackSession(o.RemoteURL, o.Auth, o.InsecureSkipTLS, o.CABundle, o.ProxyOptions)
|
||||
@ -491,7 +492,18 @@ func (r *Remote) fetch(ctx context.Context, o *FetchOptions) (sto storer.Referen
|
||||
}
|
||||
|
||||
if !updated && !updatedPrune {
|
||||
return remoteRefs, NoErrAlreadyUpToDate
|
||||
// No references updated, but may have fetched new objects, check if we now have any of our wants
|
||||
for _, hash := range req.Wants {
|
||||
exists, _ := objectExists(r.s, hash)
|
||||
if exists {
|
||||
updated = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !updated {
|
||||
return remoteRefs, NoErrAlreadyUpToDate
|
||||
}
|
||||
}
|
||||
|
||||
return remoteRefs, nil
|
||||
@ -878,17 +890,12 @@ func getHavesFromRef(
|
||||
return nil
|
||||
}
|
||||
|
||||
// No need to load the commit if we know the remote already
|
||||
// has this hash.
|
||||
if remoteRefs[h] {
|
||||
haves[h] = true
|
||||
return nil
|
||||
}
|
||||
|
||||
commit, err := object.GetCommit(s, h)
|
||||
if err != nil {
|
||||
// Ignore the error if this isn't a commit.
|
||||
haves[ref.Hash()] = true
|
||||
if !errors.Is(err, plumbing.ErrObjectNotFound) {
|
||||
// Ignore the error if this isn't a commit.
|
||||
haves[ref.Hash()] = true
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
4
vendor/github.com/go-git/go-git/v5/repository.go
generated
vendored
4
vendor/github.com/go-git/go-git/v5/repository.go
generated
vendored
@ -956,7 +956,7 @@ func (r *Repository) clone(ctx context.Context, o *CloneOptions) error {
|
||||
}
|
||||
|
||||
if o.RecurseSubmodules != NoRecurseSubmodules {
|
||||
if err := w.updateSubmodules(&SubmoduleUpdateOptions{
|
||||
if err := w.updateSubmodules(ctx, &SubmoduleUpdateOptions{
|
||||
RecurseSubmodules: o.RecurseSubmodules,
|
||||
Depth: func() int {
|
||||
if o.ShallowSubmodules {
|
||||
@ -1037,7 +1037,7 @@ func (r *Repository) setIsBare(isBare bool) error {
|
||||
return r.Storer.SetConfig(cfg)
|
||||
}
|
||||
|
||||
func (r *Repository) updateRemoteConfigIfNeeded(o *CloneOptions, c *config.RemoteConfig, head *plumbing.Reference) error {
|
||||
func (r *Repository) updateRemoteConfigIfNeeded(o *CloneOptions, c *config.RemoteConfig, _ *plumbing.Reference) error {
|
||||
if !o.SingleBranch {
|
||||
return nil
|
||||
}
|
||||
|
69
vendor/github.com/go-git/go-git/v5/status.go
generated
vendored
69
vendor/github.com/go-git/go-git/v5/status.go
generated
vendored
@ -4,6 +4,9 @@ import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"path/filepath"
|
||||
|
||||
mindex "github.com/go-git/go-git/v5/utils/merkletrie/index"
|
||||
"github.com/go-git/go-git/v5/utils/merkletrie/noder"
|
||||
)
|
||||
|
||||
// Status represents the current status of a Worktree.
|
||||
@ -77,3 +80,69 @@ const (
|
||||
Copied StatusCode = 'C'
|
||||
UpdatedButUnmerged StatusCode = 'U'
|
||||
)
|
||||
|
||||
// StatusStrategy defines the different types of strategies when processing
|
||||
// the worktree status.
|
||||
type StatusStrategy int
|
||||
|
||||
const (
|
||||
// TODO: (V6) Review the default status strategy.
|
||||
// TODO: (V6) Review the type used to represent Status, to enable lazy
|
||||
// processing of statuses going direct to the backing filesystem.
|
||||
defaultStatusStrategy = Empty
|
||||
|
||||
// Empty starts its status map from empty. Missing entries for a given
|
||||
// path means that the file is untracked. This causes a known issue (#119)
|
||||
// whereby unmodified files can be incorrectly reported as untracked.
|
||||
//
|
||||
// This can be used when returning the changed state within a modified Worktree.
|
||||
// For example, to check whether the current worktree is clean.
|
||||
Empty StatusStrategy = 0
|
||||
// Preload goes through all existing nodes from the index and add them to the
|
||||
// status map as unmodified. This is currently the most reliable strategy
|
||||
// although it comes at a performance cost in large repositories.
|
||||
//
|
||||
// This method is recommended when fetching the status of unmodified files.
|
||||
// For example, to confirm the status of a specific file that is either
|
||||
// untracked or unmodified.
|
||||
Preload StatusStrategy = 1
|
||||
)
|
||||
|
||||
func (s StatusStrategy) new(w *Worktree) (Status, error) {
|
||||
switch s {
|
||||
case Preload:
|
||||
return preloadStatus(w)
|
||||
case Empty:
|
||||
return make(Status), nil
|
||||
}
|
||||
return nil, fmt.Errorf("%w: %+v", ErrUnsupportedStatusStrategy, s)
|
||||
}
|
||||
|
||||
func preloadStatus(w *Worktree) (Status, error) {
|
||||
idx, err := w.r.Storer.Index()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
idxRoot := mindex.NewRootNode(idx)
|
||||
nodes := []noder.Noder{idxRoot}
|
||||
|
||||
status := make(Status)
|
||||
for len(nodes) > 0 {
|
||||
var node noder.Noder
|
||||
node, nodes = nodes[0], nodes[1:]
|
||||
if node.IsDir() {
|
||||
children, err := node.Children()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
nodes = append(nodes, children...)
|
||||
continue
|
||||
}
|
||||
fs := status.File(node.Name())
|
||||
fs.Worktree = Unmodified
|
||||
fs.Staging = Unmodified
|
||||
}
|
||||
|
||||
return status, nil
|
||||
}
|
||||
|
35
vendor/github.com/go-git/go-git/v5/storage/filesystem/dotgit/dotgit.go
generated
vendored
35
vendor/github.com/go-git/go-git/v5/storage/filesystem/dotgit/dotgit.go
generated
vendored
@ -72,6 +72,9 @@ var (
|
||||
// ErrIsDir is returned when a reference file is attempting to be read,
|
||||
// but the path specified is a directory.
|
||||
ErrIsDir = errors.New("reference path is a directory")
|
||||
// ErrEmptyRefFile is returned when a reference file is attempted to be read,
|
||||
// but the file is empty
|
||||
ErrEmptyRefFile = errors.New("ref file is empty")
|
||||
)
|
||||
|
||||
// Options holds configuration for the storage.
|
||||
@ -249,7 +252,7 @@ func (d *DotGit) objectPacks() ([]plumbing.Hash, error) {
|
||||
continue
|
||||
}
|
||||
|
||||
h := plumbing.NewHash(n[5 : len(n)-5]) //pack-(hash).pack
|
||||
h := plumbing.NewHash(n[5 : len(n)-5]) // pack-(hash).pack
|
||||
if h.IsZero() {
|
||||
// Ignore files with badly-formatted names.
|
||||
continue
|
||||
@ -661,18 +664,33 @@ func (d *DotGit) readReferenceFrom(rd io.Reader, name string) (ref *plumbing.Ref
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(b) == 0 {
|
||||
return nil, ErrEmptyRefFile
|
||||
}
|
||||
|
||||
line := strings.TrimSpace(string(b))
|
||||
return plumbing.NewReferenceFromStrings(name, line), nil
|
||||
}
|
||||
|
||||
// checkReferenceAndTruncate reads the reference from the given file, or the `pack-refs` file if
|
||||
// the file was empty. Then it checks that the old reference matches the stored reference and
|
||||
// truncates the file.
|
||||
func (d *DotGit) checkReferenceAndTruncate(f billy.File, old *plumbing.Reference) error {
|
||||
if old == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
ref, err := d.readReferenceFrom(f, old.Name().String())
|
||||
if errors.Is(err, ErrEmptyRefFile) {
|
||||
// This may happen if the reference is being read from a newly created file.
|
||||
// In that case, try getting the reference from the packed refs file.
|
||||
ref, err = d.packedRef(old.Name())
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if ref.Hash() != old.Hash() {
|
||||
return storage.ErrReferenceHasChanged
|
||||
}
|
||||
@ -701,7 +719,11 @@ func (d *DotGit) SetRef(r, old *plumbing.Reference) error {
|
||||
// Symbolic references are resolved and included in the output.
|
||||
func (d *DotGit) Refs() ([]*plumbing.Reference, error) {
|
||||
var refs []*plumbing.Reference
|
||||
var seen = make(map[plumbing.ReferenceName]bool)
|
||||
seen := make(map[plumbing.ReferenceName]bool)
|
||||
if err := d.addRefFromHEAD(&refs); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := d.addRefsFromRefDir(&refs, seen); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -710,10 +732,6 @@ func (d *DotGit) Refs() ([]*plumbing.Reference, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := d.addRefFromHEAD(&refs); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return refs, nil
|
||||
}
|
||||
|
||||
@ -815,7 +833,8 @@ func (d *DotGit) addRefsFromPackedRefsFile(refs *[]*plumbing.Reference, f billy.
|
||||
}
|
||||
|
||||
func (d *DotGit) openAndLockPackedRefs(doCreate bool) (
|
||||
pr billy.File, err error) {
|
||||
pr billy.File, err error,
|
||||
) {
|
||||
var f billy.File
|
||||
defer func() {
|
||||
if err != nil && f != nil {
|
||||
@ -1020,7 +1039,7 @@ func (d *DotGit) readReferenceFile(path, name string) (ref *plumbing.Reference,
|
||||
|
||||
func (d *DotGit) CountLooseRefs() (int, error) {
|
||||
var refs []*plumbing.Reference
|
||||
var seen = make(map[plumbing.ReferenceName]bool)
|
||||
seen := make(map[plumbing.ReferenceName]bool)
|
||||
if err := d.addRefsFromRefDir(&refs, seen); err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
2
vendor/github.com/go-git/go-git/v5/storage/filesystem/index.go
generated
vendored
2
vendor/github.com/go-git/go-git/v5/storage/filesystem/index.go
generated
vendored
@ -48,7 +48,7 @@ func (s *IndexStorage) Index() (i *index.Index, err error) {
|
||||
|
||||
defer ioutil.CheckClose(f, &err)
|
||||
|
||||
d := index.NewDecoder(bufio.NewReader(f))
|
||||
d := index.NewDecoder(f)
|
||||
err = d.Decode(idx)
|
||||
return idx, err
|
||||
}
|
||||
|
4
vendor/github.com/go-git/go-git/v5/storage/filesystem/object.go
generated
vendored
4
vendor/github.com/go-git/go-git/v5/storage/filesystem/object.go
generated
vendored
@ -431,13 +431,13 @@ func (s *ObjectStorage) getFromUnpacked(h plumbing.Hash) (obj plumbing.EncodedOb
|
||||
|
||||
defer ioutil.CheckClose(w, &err)
|
||||
|
||||
s.objectCache.Put(obj)
|
||||
|
||||
bufp := copyBufferPool.Get().(*[]byte)
|
||||
buf := *bufp
|
||||
_, err = io.CopyBuffer(w, r, buf)
|
||||
copyBufferPool.Put(bufp)
|
||||
|
||||
s.objectCache.Put(obj)
|
||||
|
||||
return obj, err
|
||||
}
|
||||
|
||||
|
6
vendor/github.com/go-git/go-git/v5/submodule.go
generated
vendored
6
vendor/github.com/go-git/go-git/v5/submodule.go
generated
vendored
@ -214,10 +214,10 @@ func (s *Submodule) update(ctx context.Context, o *SubmoduleUpdateOptions, force
|
||||
return err
|
||||
}
|
||||
|
||||
return s.doRecursiveUpdate(r, o)
|
||||
return s.doRecursiveUpdate(ctx, r, o)
|
||||
}
|
||||
|
||||
func (s *Submodule) doRecursiveUpdate(r *Repository, o *SubmoduleUpdateOptions) error {
|
||||
func (s *Submodule) doRecursiveUpdate(ctx context.Context, r *Repository, o *SubmoduleUpdateOptions) error {
|
||||
if o.RecurseSubmodules == NoRecurseSubmodules {
|
||||
return nil
|
||||
}
|
||||
@ -236,7 +236,7 @@ func (s *Submodule) doRecursiveUpdate(r *Repository, o *SubmoduleUpdateOptions)
|
||||
*new = *o
|
||||
|
||||
new.RecurseSubmodules--
|
||||
return l.Update(new)
|
||||
return l.UpdateContext(ctx, new)
|
||||
}
|
||||
|
||||
func (s *Submodule) fetchAndCheckout(
|
||||
|
9
vendor/github.com/go-git/go-git/v5/utils/merkletrie/change.go
generated
vendored
9
vendor/github.com/go-git/go-git/v5/utils/merkletrie/change.go
generated
vendored
@ -1,12 +1,17 @@
|
||||
package merkletrie
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/go-git/go-git/v5/utils/merkletrie/noder"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrEmptyFileName = errors.New("empty filename in tree entry")
|
||||
)
|
||||
|
||||
// Action values represent the kind of things a Change can represent:
|
||||
// insertion, deletions or modifications of files.
|
||||
type Action int
|
||||
@ -121,6 +126,10 @@ func (l *Changes) AddRecursiveDelete(root noder.Path) error {
|
||||
type noderToChangeFn func(noder.Path) Change // NewInsert or NewDelete
|
||||
|
||||
func (l *Changes) addRecursive(root noder.Path, ctor noderToChangeFn) error {
|
||||
if root.String() == "" {
|
||||
return ErrEmptyFileName
|
||||
}
|
||||
|
||||
if !root.IsDir() {
|
||||
l.Add(ctor(root))
|
||||
return nil
|
||||
|
2
vendor/github.com/go-git/go-git/v5/utils/merkletrie/difftree.go
generated
vendored
2
vendor/github.com/go-git/go-git/v5/utils/merkletrie/difftree.go
generated
vendored
@ -11,7 +11,7 @@ package merkletrie
|
||||
// corresponding changes and move the iterators further over both
|
||||
// trees.
|
||||
//
|
||||
// The table bellow show all the possible comparison results, along
|
||||
// The table below shows all the possible comparison results, along
|
||||
// with what changes should we produce and how to advance the
|
||||
// iterators.
|
||||
//
|
||||
|
2
vendor/github.com/go-git/go-git/v5/utils/sync/bufio.go
generated
vendored
2
vendor/github.com/go-git/go-git/v5/utils/sync/bufio.go
generated
vendored
@ -13,7 +13,7 @@ var bufioReader = sync.Pool{
|
||||
}
|
||||
|
||||
// GetBufioReader returns a *bufio.Reader that is managed by a sync.Pool.
|
||||
// Returns a bufio.Reader that is resetted with reader and ready for use.
|
||||
// Returns a bufio.Reader that is reset with reader and ready for use.
|
||||
//
|
||||
// After use, the *bufio.Reader should be put back into the sync.Pool
|
||||
// by calling PutBufioReader.
|
||||
|
2
vendor/github.com/go-git/go-git/v5/utils/sync/bytes.go
generated
vendored
2
vendor/github.com/go-git/go-git/v5/utils/sync/bytes.go
generated
vendored
@ -35,7 +35,7 @@ func PutByteSlice(buf *[]byte) {
|
||||
}
|
||||
|
||||
// GetBytesBuffer returns a *bytes.Buffer that is managed by a sync.Pool.
|
||||
// Returns a buffer that is resetted and ready for use.
|
||||
// Returns a buffer that is reset and ready for use.
|
||||
//
|
||||
// After use, the *bytes.Buffer should be put back into the sync.Pool
|
||||
// by calling PutBytesBuffer.
|
||||
|
4
vendor/github.com/go-git/go-git/v5/utils/sync/zlib.go
generated
vendored
4
vendor/github.com/go-git/go-git/v5/utils/sync/zlib.go
generated
vendored
@ -35,7 +35,7 @@ type ZLibReader struct {
|
||||
}
|
||||
|
||||
// GetZlibReader returns a ZLibReader that is managed by a sync.Pool.
|
||||
// Returns a ZLibReader that is resetted using a dictionary that is
|
||||
// Returns a ZLibReader that is reset using a dictionary that is
|
||||
// also managed by a sync.Pool.
|
||||
//
|
||||
// After use, the ZLibReader should be put back into the sync.Pool
|
||||
@ -58,7 +58,7 @@ func PutZlibReader(z ZLibReader) {
|
||||
}
|
||||
|
||||
// GetZlibWriter returns a *zlib.Writer that is managed by a sync.Pool.
|
||||
// Returns a writer that is resetted with w and ready for use.
|
||||
// Returns a writer that is reset with w and ready for use.
|
||||
//
|
||||
// After use, the *zlib.Writer should be put back into the sync.Pool
|
||||
// by calling PutZlibWriter.
|
||||
|
117
vendor/github.com/go-git/go-git/v5/worktree.go
generated
vendored
117
vendor/github.com/go-git/go-git/v5/worktree.go
generated
vendored
@ -25,11 +25,12 @@ import (
|
||||
)
|
||||
|
||||
var (
|
||||
ErrWorktreeNotClean = errors.New("worktree is not clean")
|
||||
ErrSubmoduleNotFound = errors.New("submodule not found")
|
||||
ErrUnstagedChanges = errors.New("worktree contains unstaged changes")
|
||||
ErrGitModulesSymlink = errors.New(gitmodulesFile + " is a symlink")
|
||||
ErrNonFastForwardUpdate = errors.New("non-fast-forward update")
|
||||
ErrWorktreeNotClean = errors.New("worktree is not clean")
|
||||
ErrSubmoduleNotFound = errors.New("submodule not found")
|
||||
ErrUnstagedChanges = errors.New("worktree contains unstaged changes")
|
||||
ErrGitModulesSymlink = errors.New(gitmodulesFile + " is a symlink")
|
||||
ErrNonFastForwardUpdate = errors.New("non-fast-forward update")
|
||||
ErrRestoreWorktreeOnlyNotSupported = errors.New("worktree only is not supported")
|
||||
)
|
||||
|
||||
// Worktree represents a git worktree.
|
||||
@ -139,7 +140,7 @@ func (w *Worktree) PullContext(ctx context.Context, o *PullOptions) error {
|
||||
}
|
||||
|
||||
if o.RecurseSubmodules != NoRecurseSubmodules {
|
||||
return w.updateSubmodules(&SubmoduleUpdateOptions{
|
||||
return w.updateSubmodules(ctx, &SubmoduleUpdateOptions{
|
||||
RecurseSubmodules: o.RecurseSubmodules,
|
||||
Auth: o.Auth,
|
||||
})
|
||||
@ -148,13 +149,13 @@ func (w *Worktree) PullContext(ctx context.Context, o *PullOptions) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *Worktree) updateSubmodules(o *SubmoduleUpdateOptions) error {
|
||||
func (w *Worktree) updateSubmodules(ctx context.Context, o *SubmoduleUpdateOptions) error {
|
||||
s, err := w.Submodules()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
o.Init = true
|
||||
return s.Update(o)
|
||||
return s.UpdateContext(ctx, o)
|
||||
}
|
||||
|
||||
// Checkout switch branches or restore working tree files.
|
||||
@ -307,13 +308,13 @@ func (w *Worktree) ResetSparsely(opts *ResetOptions, dirs []string) error {
|
||||
}
|
||||
|
||||
if opts.Mode == MixedReset || opts.Mode == MergeReset || opts.Mode == HardReset {
|
||||
if err := w.resetIndex(t, dirs); err != nil {
|
||||
if err := w.resetIndex(t, dirs, opts.Files); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if opts.Mode == MergeReset || opts.Mode == HardReset {
|
||||
if err := w.resetWorktree(t); err != nil {
|
||||
if err := w.resetWorktree(t, opts.Files); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@ -321,20 +322,52 @@ func (w *Worktree) ResetSparsely(opts *ResetOptions, dirs []string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Restore restores specified files in the working tree or stage with contents from
|
||||
// a restore source. If a path is tracked but does not exist in the restore,
|
||||
// source, it will be removed to match the source.
|
||||
//
|
||||
// If Staged and Worktree are true, then the restore source will be the index.
|
||||
// If only Staged is true, then the restore source will be HEAD.
|
||||
// If only Worktree is true or neither Staged nor Worktree are true, will
|
||||
// result in ErrRestoreWorktreeOnlyNotSupported because restoring the working
|
||||
// tree while leaving the stage untouched is not currently supported.
|
||||
//
|
||||
// Restore with no files specified will return ErrNoRestorePaths.
|
||||
func (w *Worktree) Restore(o *RestoreOptions) error {
|
||||
if err := o.Validate(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if o.Staged {
|
||||
opts := &ResetOptions{
|
||||
Files: o.Files,
|
||||
}
|
||||
|
||||
if o.Worktree {
|
||||
// If we are doing both Worktree and Staging then it is a hard reset
|
||||
opts.Mode = HardReset
|
||||
} else {
|
||||
// If we are doing just staging then it is a mixed reset
|
||||
opts.Mode = MixedReset
|
||||
}
|
||||
|
||||
return w.Reset(opts)
|
||||
}
|
||||
|
||||
return ErrRestoreWorktreeOnlyNotSupported
|
||||
}
|
||||
|
||||
// Reset the worktree to a specified state.
|
||||
func (w *Worktree) Reset(opts *ResetOptions) error {
|
||||
return w.ResetSparsely(opts, nil)
|
||||
}
|
||||
|
||||
func (w *Worktree) resetIndex(t *object.Tree, dirs []string) error {
|
||||
func (w *Worktree) resetIndex(t *object.Tree, dirs []string, files []string) error {
|
||||
idx, err := w.r.Storer.Index()
|
||||
if len(dirs) > 0 {
|
||||
idx.SkipUnless(dirs)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
b := newIndexBuilder(idx)
|
||||
|
||||
changes, err := w.diffTreeWithStaging(t, true)
|
||||
@ -362,6 +395,13 @@ func (w *Worktree) resetIndex(t *object.Tree, dirs []string) error {
|
||||
name = ch.From.String()
|
||||
}
|
||||
|
||||
if len(files) > 0 {
|
||||
contains := inFiles(files, name)
|
||||
if !contains {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
b.Remove(name)
|
||||
if e == nil {
|
||||
continue
|
||||
@ -376,10 +416,25 @@ func (w *Worktree) resetIndex(t *object.Tree, dirs []string) error {
|
||||
}
|
||||
|
||||
b.Write(idx)
|
||||
|
||||
if len(dirs) > 0 {
|
||||
idx.SkipUnless(dirs)
|
||||
}
|
||||
|
||||
return w.r.Storer.SetIndex(idx)
|
||||
}
|
||||
|
||||
func (w *Worktree) resetWorktree(t *object.Tree) error {
|
||||
func inFiles(files []string, v string) bool {
|
||||
for _, s := range files {
|
||||
if s == v {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func (w *Worktree) resetWorktree(t *object.Tree, files []string) error {
|
||||
changes, err := w.diffStagingWithWorktree(true, false)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -395,6 +450,25 @@ func (w *Worktree) resetWorktree(t *object.Tree) error {
|
||||
if err := w.validChange(ch); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(files) > 0 {
|
||||
file := ""
|
||||
if ch.From != nil {
|
||||
file = ch.From.String()
|
||||
} else if ch.To != nil {
|
||||
file = ch.To.String()
|
||||
}
|
||||
|
||||
if file == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
contains := inFiles(files, file)
|
||||
if !contains {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if err := w.checkoutChange(ch, t, b); err != nil {
|
||||
return err
|
||||
}
|
||||
@ -642,7 +716,7 @@ func (w *Worktree) checkoutChangeRegularFile(name string,
|
||||
return err
|
||||
}
|
||||
|
||||
return w.addIndexFromFile(name, e.Hash, idx)
|
||||
return w.addIndexFromFile(name, e.Hash, f.Mode, idx)
|
||||
}
|
||||
|
||||
return nil
|
||||
@ -725,18 +799,13 @@ func (w *Worktree) addIndexFromTreeEntry(name string, f *object.TreeEntry, idx *
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *Worktree) addIndexFromFile(name string, h plumbing.Hash, idx *indexBuilder) error {
|
||||
func (w *Worktree) addIndexFromFile(name string, h plumbing.Hash, mode filemode.FileMode, idx *indexBuilder) error {
|
||||
idx.Remove(name)
|
||||
fi, err := w.Filesystem.Lstat(name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
mode, err := filemode.NewFromOSFileMode(fi.Mode())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
e := &index.Entry{
|
||||
Hash: h,
|
||||
Name: name,
|
||||
@ -1058,7 +1127,7 @@ func rmFileAndDirsIfEmpty(fs billy.Filesystem, name string) error {
|
||||
dir := filepath.Dir(name)
|
||||
for {
|
||||
removed, err := removeDirIfEmpty(fs, dir)
|
||||
if err != nil {
|
||||
if err != nil && !os.IsNotExist(err) {
|
||||
return err
|
||||
}
|
||||
|
||||
|
43
vendor/github.com/go-git/go-git/v5/worktree_commit.go
generated
vendored
43
vendor/github.com/go-git/go-git/v5/worktree_commit.go
generated
vendored
@ -5,6 +5,7 @@ import (
|
||||
"errors"
|
||||
"io"
|
||||
"path"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
@ -23,6 +24,10 @@ var (
|
||||
// ErrEmptyCommit occurs when a commit is attempted using a clean
|
||||
// working tree, with no changes to be committed.
|
||||
ErrEmptyCommit = errors.New("cannot create empty commit: clean working tree")
|
||||
|
||||
// characters to be removed from user name and/or email before using them to build a commit object
|
||||
// See https://git-scm.com/docs/git-commit#_commit_information
|
||||
invalidCharactersRe = regexp.MustCompile(`[<>\n]`)
|
||||
)
|
||||
|
||||
// Commit stores the current contents of the index in a new commit along with
|
||||
@ -38,8 +43,6 @@ func (w *Worktree) Commit(msg string, opts *CommitOptions) (plumbing.Hash, error
|
||||
}
|
||||
}
|
||||
|
||||
var treeHash plumbing.Hash
|
||||
|
||||
if opts.Amend {
|
||||
head, err := w.r.Head()
|
||||
if err != nil {
|
||||
@ -61,16 +64,34 @@ func (w *Worktree) Commit(msg string, opts *CommitOptions) (plumbing.Hash, error
|
||||
return plumbing.ZeroHash, err
|
||||
}
|
||||
|
||||
// First handle the case of the first commit in the repository being empty.
|
||||
if len(opts.Parents) == 0 && len(idx.Entries) == 0 && !opts.AllowEmptyCommits {
|
||||
return plumbing.ZeroHash, ErrEmptyCommit
|
||||
}
|
||||
|
||||
h := &buildTreeHelper{
|
||||
fs: w.Filesystem,
|
||||
s: w.r.Storer,
|
||||
}
|
||||
|
||||
treeHash, err = h.BuildTree(idx, opts)
|
||||
treeHash, err := h.BuildTree(idx, opts)
|
||||
if err != nil {
|
||||
return plumbing.ZeroHash, err
|
||||
}
|
||||
|
||||
previousTree := plumbing.ZeroHash
|
||||
if len(opts.Parents) > 0 {
|
||||
parentCommit, err := w.r.CommitObject(opts.Parents[0])
|
||||
if err != nil {
|
||||
return plumbing.ZeroHash, err
|
||||
}
|
||||
previousTree = parentCommit.TreeHash
|
||||
}
|
||||
|
||||
if treeHash == previousTree && !opts.AllowEmptyCommits {
|
||||
return plumbing.ZeroHash, ErrEmptyCommit
|
||||
}
|
||||
|
||||
commit, err := w.buildCommitObject(msg, opts, treeHash)
|
||||
if err != nil {
|
||||
return plumbing.ZeroHash, err
|
||||
@ -121,8 +142,8 @@ func (w *Worktree) updateHEAD(commit plumbing.Hash) error {
|
||||
|
||||
func (w *Worktree) buildCommitObject(msg string, opts *CommitOptions, tree plumbing.Hash) (plumbing.Hash, error) {
|
||||
commit := &object.Commit{
|
||||
Author: *opts.Author,
|
||||
Committer: *opts.Committer,
|
||||
Author: w.sanitize(*opts.Author),
|
||||
Committer: w.sanitize(*opts.Committer),
|
||||
Message: msg,
|
||||
TreeHash: tree,
|
||||
ParentHashes: opts.Parents,
|
||||
@ -148,6 +169,14 @@ func (w *Worktree) buildCommitObject(msg string, opts *CommitOptions, tree plumb
|
||||
return w.r.Storer.SetEncodedObject(obj)
|
||||
}
|
||||
|
||||
func (w *Worktree) sanitize(signature object.Signature) object.Signature {
|
||||
return object.Signature{
|
||||
Name: invalidCharactersRe.ReplaceAllString(signature.Name, ""),
|
||||
Email: invalidCharactersRe.ReplaceAllString(signature.Email, ""),
|
||||
When: signature.When,
|
||||
}
|
||||
}
|
||||
|
||||
type gpgSigner struct {
|
||||
key *openpgp.Entity
|
||||
cfg *packet.Config
|
||||
@ -175,10 +204,6 @@ type buildTreeHelper struct {
|
||||
// BuildTree builds the tree objects and push its to the storer, the hash
|
||||
// of the root tree is returned.
|
||||
func (h *buildTreeHelper) BuildTree(idx *index.Index, opts *CommitOptions) (plumbing.Hash, error) {
|
||||
if len(idx.Entries) == 0 && (opts == nil || !opts.AllowEmptyCommits) {
|
||||
return plumbing.ZeroHash, ErrEmptyCommit
|
||||
}
|
||||
|
||||
const rootNode = ""
|
||||
h.trees = map[string]*object.Tree{rootNode: {}}
|
||||
h.entries = map[string]*object.TreeEntry{}
|
||||
|
3
vendor/github.com/go-git/go-git/v5/worktree_linux.go
generated
vendored
3
vendor/github.com/go-git/go-git/v5/worktree_linux.go
generated
vendored
@ -1,3 +1,4 @@
|
||||
//go:build linux
|
||||
// +build linux
|
||||
|
||||
package git
|
||||
@ -21,6 +22,6 @@ func init() {
|
||||
}
|
||||
}
|
||||
|
||||
func isSymlinkWindowsNonAdmin(err error) bool {
|
||||
func isSymlinkWindowsNonAdmin(_ error) bool {
|
||||
return false
|
||||
}
|
||||
|
34
vendor/github.com/go-git/go-git/v5/worktree_status.go
generated
vendored
34
vendor/github.com/go-git/go-git/v5/worktree_status.go
generated
vendored
@ -29,10 +29,23 @@ var (
|
||||
// ErrGlobNoMatches in an AddGlob if the glob pattern does not match any
|
||||
// files in the worktree.
|
||||
ErrGlobNoMatches = errors.New("glob pattern did not match any files")
|
||||
// ErrUnsupportedStatusStrategy occurs when an invalid StatusStrategy is used
|
||||
// when processing the Worktree status.
|
||||
ErrUnsupportedStatusStrategy = errors.New("unsupported status strategy")
|
||||
)
|
||||
|
||||
// Status returns the working tree status.
|
||||
func (w *Worktree) Status() (Status, error) {
|
||||
return w.StatusWithOptions(StatusOptions{Strategy: defaultStatusStrategy})
|
||||
}
|
||||
|
||||
// StatusOptions defines the options for Worktree.StatusWithOptions().
|
||||
type StatusOptions struct {
|
||||
Strategy StatusStrategy
|
||||
}
|
||||
|
||||
// StatusWithOptions returns the working tree status.
|
||||
func (w *Worktree) StatusWithOptions(o StatusOptions) (Status, error) {
|
||||
var hash plumbing.Hash
|
||||
|
||||
ref, err := w.r.Head()
|
||||
@ -44,11 +57,14 @@ func (w *Worktree) Status() (Status, error) {
|
||||
hash = ref.Hash()
|
||||
}
|
||||
|
||||
return w.status(hash)
|
||||
return w.status(o.Strategy, hash)
|
||||
}
|
||||
|
||||
func (w *Worktree) status(commit plumbing.Hash) (Status, error) {
|
||||
s := make(Status)
|
||||
func (w *Worktree) status(ss StatusStrategy, commit plumbing.Hash) (Status, error) {
|
||||
s, err := ss.new(w)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
left, err := w.diffCommitWithStaging(commit, false)
|
||||
if err != nil {
|
||||
@ -488,7 +504,7 @@ func (w *Worktree) copyFileToStorage(path string) (hash plumbing.Hash, err error
|
||||
return w.r.Storer.SetEncodedObject(obj)
|
||||
}
|
||||
|
||||
func (w *Worktree) fillEncodedObjectFromFile(dst io.Writer, path string, fi os.FileInfo) (err error) {
|
||||
func (w *Worktree) fillEncodedObjectFromFile(dst io.Writer, path string, _ os.FileInfo) (err error) {
|
||||
src, err := w.Filesystem.Open(path)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -503,7 +519,7 @@ func (w *Worktree) fillEncodedObjectFromFile(dst io.Writer, path string, fi os.F
|
||||
return err
|
||||
}
|
||||
|
||||
func (w *Worktree) fillEncodedObjectFromSymlink(dst io.Writer, path string, fi os.FileInfo) error {
|
||||
func (w *Worktree) fillEncodedObjectFromSymlink(dst io.Writer, path string, _ os.FileInfo) error {
|
||||
target, err := w.Filesystem.Readlink(path)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -543,9 +559,11 @@ func (w *Worktree) doUpdateFileToIndex(e *index.Entry, filename string, h plumbi
|
||||
return err
|
||||
}
|
||||
|
||||
if e.Mode.IsRegular() {
|
||||
e.Size = uint32(info.Size())
|
||||
}
|
||||
// The entry size must always reflect the current state, otherwise
|
||||
// it will cause go-git's Worktree.Status() to divert from "git status".
|
||||
// The size of a symlink is the length of the path to the target.
|
||||
// The size of Regular and Executable files is the size of the files.
|
||||
e.Size = uint32(info.Size())
|
||||
|
||||
fillSystemInfo(e, info.Sys())
|
||||
return nil
|
||||
|
Reference in New Issue
Block a user