build: go 1.24

We were running behind and there were quite some deprecations to update.
This was mostly in the upstream copy/pasta package but seems quite
minimal.
This commit is contained in:
2025-03-16 12:04:32 +01:00
parent a2b678caf6
commit 1723025fbf
822 changed files with 25433 additions and 197407 deletions

825
vendor/golang.org/x/crypto/cryptobyte/asn1.go generated vendored Normal file
View File

@ -0,0 +1,825 @@
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package cryptobyte
import (
encoding_asn1 "encoding/asn1"
"fmt"
"math/big"
"reflect"
"time"
"golang.org/x/crypto/cryptobyte/asn1"
)
// This file contains ASN.1-related methods for String and Builder.
// Builder
// AddASN1Int64 appends a DER-encoded ASN.1 INTEGER.
func (b *Builder) AddASN1Int64(v int64) {
b.addASN1Signed(asn1.INTEGER, v)
}
// AddASN1Int64WithTag appends a DER-encoded ASN.1 INTEGER with the
// given tag.
func (b *Builder) AddASN1Int64WithTag(v int64, tag asn1.Tag) {
b.addASN1Signed(tag, v)
}
// AddASN1Enum appends a DER-encoded ASN.1 ENUMERATION.
func (b *Builder) AddASN1Enum(v int64) {
b.addASN1Signed(asn1.ENUM, v)
}
func (b *Builder) addASN1Signed(tag asn1.Tag, v int64) {
b.AddASN1(tag, func(c *Builder) {
length := 1
for i := v; i >= 0x80 || i < -0x80; i >>= 8 {
length++
}
for ; length > 0; length-- {
i := v >> uint((length-1)*8) & 0xff
c.AddUint8(uint8(i))
}
})
}
// AddASN1Uint64 appends a DER-encoded ASN.1 INTEGER.
func (b *Builder) AddASN1Uint64(v uint64) {
b.AddASN1(asn1.INTEGER, func(c *Builder) {
length := 1
for i := v; i >= 0x80; i >>= 8 {
length++
}
for ; length > 0; length-- {
i := v >> uint((length-1)*8) & 0xff
c.AddUint8(uint8(i))
}
})
}
// AddASN1BigInt appends a DER-encoded ASN.1 INTEGER.
func (b *Builder) AddASN1BigInt(n *big.Int) {
if b.err != nil {
return
}
b.AddASN1(asn1.INTEGER, func(c *Builder) {
if n.Sign() < 0 {
// A negative number has to be converted to two's-complement form. So we
// invert and subtract 1. If the most-significant-bit isn't set then
// we'll need to pad the beginning with 0xff in order to keep the number
// negative.
nMinus1 := new(big.Int).Neg(n)
nMinus1.Sub(nMinus1, bigOne)
bytes := nMinus1.Bytes()
for i := range bytes {
bytes[i] ^= 0xff
}
if len(bytes) == 0 || bytes[0]&0x80 == 0 {
c.add(0xff)
}
c.add(bytes...)
} else if n.Sign() == 0 {
c.add(0)
} else {
bytes := n.Bytes()
if bytes[0]&0x80 != 0 {
c.add(0)
}
c.add(bytes...)
}
})
}
// AddASN1OctetString appends a DER-encoded ASN.1 OCTET STRING.
func (b *Builder) AddASN1OctetString(bytes []byte) {
b.AddASN1(asn1.OCTET_STRING, func(c *Builder) {
c.AddBytes(bytes)
})
}
const generalizedTimeFormatStr = "20060102150405Z0700"
// AddASN1GeneralizedTime appends a DER-encoded ASN.1 GENERALIZEDTIME.
func (b *Builder) AddASN1GeneralizedTime(t time.Time) {
if t.Year() < 0 || t.Year() > 9999 {
b.err = fmt.Errorf("cryptobyte: cannot represent %v as a GeneralizedTime", t)
return
}
b.AddASN1(asn1.GeneralizedTime, func(c *Builder) {
c.AddBytes([]byte(t.Format(generalizedTimeFormatStr)))
})
}
// AddASN1UTCTime appends a DER-encoded ASN.1 UTCTime.
func (b *Builder) AddASN1UTCTime(t time.Time) {
b.AddASN1(asn1.UTCTime, func(c *Builder) {
// As utilized by the X.509 profile, UTCTime can only
// represent the years 1950 through 2049.
if t.Year() < 1950 || t.Year() >= 2050 {
b.err = fmt.Errorf("cryptobyte: cannot represent %v as a UTCTime", t)
return
}
c.AddBytes([]byte(t.Format(defaultUTCTimeFormatStr)))
})
}
// AddASN1BitString appends a DER-encoded ASN.1 BIT STRING. This does not
// support BIT STRINGs that are not a whole number of bytes.
func (b *Builder) AddASN1BitString(data []byte) {
b.AddASN1(asn1.BIT_STRING, func(b *Builder) {
b.AddUint8(0)
b.AddBytes(data)
})
}
func (b *Builder) addBase128Int(n int64) {
var length int
if n == 0 {
length = 1
} else {
for i := n; i > 0; i >>= 7 {
length++
}
}
for i := length - 1; i >= 0; i-- {
o := byte(n >> uint(i*7))
o &= 0x7f
if i != 0 {
o |= 0x80
}
b.add(o)
}
}
func isValidOID(oid encoding_asn1.ObjectIdentifier) bool {
if len(oid) < 2 {
return false
}
if oid[0] > 2 || (oid[0] <= 1 && oid[1] >= 40) {
return false
}
for _, v := range oid {
if v < 0 {
return false
}
}
return true
}
func (b *Builder) AddASN1ObjectIdentifier(oid encoding_asn1.ObjectIdentifier) {
b.AddASN1(asn1.OBJECT_IDENTIFIER, func(b *Builder) {
if !isValidOID(oid) {
b.err = fmt.Errorf("cryptobyte: invalid OID: %v", oid)
return
}
b.addBase128Int(int64(oid[0])*40 + int64(oid[1]))
for _, v := range oid[2:] {
b.addBase128Int(int64(v))
}
})
}
func (b *Builder) AddASN1Boolean(v bool) {
b.AddASN1(asn1.BOOLEAN, func(b *Builder) {
if v {
b.AddUint8(0xff)
} else {
b.AddUint8(0)
}
})
}
func (b *Builder) AddASN1NULL() {
b.add(uint8(asn1.NULL), 0)
}
// MarshalASN1 calls encoding_asn1.Marshal on its input and appends the result if
// successful or records an error if one occurred.
func (b *Builder) MarshalASN1(v interface{}) {
// NOTE(martinkr): This is somewhat of a hack to allow propagation of
// encoding_asn1.Marshal errors into Builder.err. N.B. if you call MarshalASN1 with a
// value embedded into a struct, its tag information is lost.
if b.err != nil {
return
}
bytes, err := encoding_asn1.Marshal(v)
if err != nil {
b.err = err
return
}
b.AddBytes(bytes)
}
// AddASN1 appends an ASN.1 object. The object is prefixed with the given tag.
// Tags greater than 30 are not supported and result in an error (i.e.
// low-tag-number form only). The child builder passed to the
// BuilderContinuation can be used to build the content of the ASN.1 object.
func (b *Builder) AddASN1(tag asn1.Tag, f BuilderContinuation) {
if b.err != nil {
return
}
// Identifiers with the low five bits set indicate high-tag-number format
// (two or more octets), which we don't support.
if tag&0x1f == 0x1f {
b.err = fmt.Errorf("cryptobyte: high-tag number identifier octects not supported: 0x%x", tag)
return
}
b.AddUint8(uint8(tag))
b.addLengthPrefixed(1, true, f)
}
// String
// ReadASN1Boolean decodes an ASN.1 BOOLEAN and converts it to a boolean
// representation into out and advances. It reports whether the read
// was successful.
func (s *String) ReadASN1Boolean(out *bool) bool {
var bytes String
if !s.ReadASN1(&bytes, asn1.BOOLEAN) || len(bytes) != 1 {
return false
}
switch bytes[0] {
case 0:
*out = false
case 0xff:
*out = true
default:
return false
}
return true
}
// ReadASN1Integer decodes an ASN.1 INTEGER into out and advances. If out does
// not point to an integer, to a big.Int, or to a []byte it panics. Only
// positive and zero values can be decoded into []byte, and they are returned as
// big-endian binary values that share memory with s. Positive values will have
// no leading zeroes, and zero will be returned as a single zero byte.
// ReadASN1Integer reports whether the read was successful.
func (s *String) ReadASN1Integer(out interface{}) bool {
switch out := out.(type) {
case *int, *int8, *int16, *int32, *int64:
var i int64
if !s.readASN1Int64(&i) || reflect.ValueOf(out).Elem().OverflowInt(i) {
return false
}
reflect.ValueOf(out).Elem().SetInt(i)
return true
case *uint, *uint8, *uint16, *uint32, *uint64:
var u uint64
if !s.readASN1Uint64(&u) || reflect.ValueOf(out).Elem().OverflowUint(u) {
return false
}
reflect.ValueOf(out).Elem().SetUint(u)
return true
case *big.Int:
return s.readASN1BigInt(out)
case *[]byte:
return s.readASN1Bytes(out)
default:
panic("out does not point to an integer type")
}
}
func checkASN1Integer(bytes []byte) bool {
if len(bytes) == 0 {
// An INTEGER is encoded with at least one octet.
return false
}
if len(bytes) == 1 {
return true
}
if bytes[0] == 0 && bytes[1]&0x80 == 0 || bytes[0] == 0xff && bytes[1]&0x80 == 0x80 {
// Value is not minimally encoded.
return false
}
return true
}
var bigOne = big.NewInt(1)
func (s *String) readASN1BigInt(out *big.Int) bool {
var bytes String
if !s.ReadASN1(&bytes, asn1.INTEGER) || !checkASN1Integer(bytes) {
return false
}
if bytes[0]&0x80 == 0x80 {
// Negative number.
neg := make([]byte, len(bytes))
for i, b := range bytes {
neg[i] = ^b
}
out.SetBytes(neg)
out.Add(out, bigOne)
out.Neg(out)
} else {
out.SetBytes(bytes)
}
return true
}
func (s *String) readASN1Bytes(out *[]byte) bool {
var bytes String
if !s.ReadASN1(&bytes, asn1.INTEGER) || !checkASN1Integer(bytes) {
return false
}
if bytes[0]&0x80 == 0x80 {
return false
}
for len(bytes) > 1 && bytes[0] == 0 {
bytes = bytes[1:]
}
*out = bytes
return true
}
func (s *String) readASN1Int64(out *int64) bool {
var bytes String
if !s.ReadASN1(&bytes, asn1.INTEGER) || !checkASN1Integer(bytes) || !asn1Signed(out, bytes) {
return false
}
return true
}
func asn1Signed(out *int64, n []byte) bool {
length := len(n)
if length > 8 {
return false
}
for i := 0; i < length; i++ {
*out <<= 8
*out |= int64(n[i])
}
// Shift up and down in order to sign extend the result.
*out <<= 64 - uint8(length)*8
*out >>= 64 - uint8(length)*8
return true
}
func (s *String) readASN1Uint64(out *uint64) bool {
var bytes String
if !s.ReadASN1(&bytes, asn1.INTEGER) || !checkASN1Integer(bytes) || !asn1Unsigned(out, bytes) {
return false
}
return true
}
func asn1Unsigned(out *uint64, n []byte) bool {
length := len(n)
if length > 9 || length == 9 && n[0] != 0 {
// Too large for uint64.
return false
}
if n[0]&0x80 != 0 {
// Negative number.
return false
}
for i := 0; i < length; i++ {
*out <<= 8
*out |= uint64(n[i])
}
return true
}
// ReadASN1Int64WithTag decodes an ASN.1 INTEGER with the given tag into out
// and advances. It reports whether the read was successful and resulted in a
// value that can be represented in an int64.
func (s *String) ReadASN1Int64WithTag(out *int64, tag asn1.Tag) bool {
var bytes String
return s.ReadASN1(&bytes, tag) && checkASN1Integer(bytes) && asn1Signed(out, bytes)
}
// ReadASN1Enum decodes an ASN.1 ENUMERATION into out and advances. It reports
// whether the read was successful.
func (s *String) ReadASN1Enum(out *int) bool {
var bytes String
var i int64
if !s.ReadASN1(&bytes, asn1.ENUM) || !checkASN1Integer(bytes) || !asn1Signed(&i, bytes) {
return false
}
if int64(int(i)) != i {
return false
}
*out = int(i)
return true
}
func (s *String) readBase128Int(out *int) bool {
ret := 0
for i := 0; len(*s) > 0; i++ {
if i == 5 {
return false
}
// Avoid overflowing int on a 32-bit platform.
// We don't want different behavior based on the architecture.
if ret >= 1<<(31-7) {
return false
}
ret <<= 7
b := s.read(1)[0]
// ITU-T X.690, section 8.19.2:
// The subidentifier shall be encoded in the fewest possible octets,
// that is, the leading octet of the subidentifier shall not have the value 0x80.
if i == 0 && b == 0x80 {
return false
}
ret |= int(b & 0x7f)
if b&0x80 == 0 {
*out = ret
return true
}
}
return false // truncated
}
// ReadASN1ObjectIdentifier decodes an ASN.1 OBJECT IDENTIFIER into out and
// advances. It reports whether the read was successful.
func (s *String) ReadASN1ObjectIdentifier(out *encoding_asn1.ObjectIdentifier) bool {
var bytes String
if !s.ReadASN1(&bytes, asn1.OBJECT_IDENTIFIER) || len(bytes) == 0 {
return false
}
// In the worst case, we get two elements from the first byte (which is
// encoded differently) and then every varint is a single byte long.
components := make([]int, len(bytes)+1)
// The first varint is 40*value1 + value2:
// According to this packing, value1 can take the values 0, 1 and 2 only.
// When value1 = 0 or value1 = 1, then value2 is <= 39. When value1 = 2,
// then there are no restrictions on value2.
var v int
if !bytes.readBase128Int(&v) {
return false
}
if v < 80 {
components[0] = v / 40
components[1] = v % 40
} else {
components[0] = 2
components[1] = v - 80
}
i := 2
for ; len(bytes) > 0; i++ {
if !bytes.readBase128Int(&v) {
return false
}
components[i] = v
}
*out = components[:i]
return true
}
// ReadASN1GeneralizedTime decodes an ASN.1 GENERALIZEDTIME into out and
// advances. It reports whether the read was successful.
func (s *String) ReadASN1GeneralizedTime(out *time.Time) bool {
var bytes String
if !s.ReadASN1(&bytes, asn1.GeneralizedTime) {
return false
}
t := string(bytes)
res, err := time.Parse(generalizedTimeFormatStr, t)
if err != nil {
return false
}
if serialized := res.Format(generalizedTimeFormatStr); serialized != t {
return false
}
*out = res
return true
}
const defaultUTCTimeFormatStr = "060102150405Z0700"
// ReadASN1UTCTime decodes an ASN.1 UTCTime into out and advances.
// It reports whether the read was successful.
func (s *String) ReadASN1UTCTime(out *time.Time) bool {
var bytes String
if !s.ReadASN1(&bytes, asn1.UTCTime) {
return false
}
t := string(bytes)
formatStr := defaultUTCTimeFormatStr
var err error
res, err := time.Parse(formatStr, t)
if err != nil {
// Fallback to minute precision if we can't parse second
// precision. If we are following X.509 or X.690 we shouldn't
// support this, but we do.
formatStr = "0601021504Z0700"
res, err = time.Parse(formatStr, t)
}
if err != nil {
return false
}
if serialized := res.Format(formatStr); serialized != t {
return false
}
if res.Year() >= 2050 {
// UTCTime interprets the low order digits 50-99 as 1950-99.
// This only applies to its use in the X.509 profile.
// See https://tools.ietf.org/html/rfc5280#section-4.1.2.5.1
res = res.AddDate(-100, 0, 0)
}
*out = res
return true
}
// ReadASN1BitString decodes an ASN.1 BIT STRING into out and advances.
// It reports whether the read was successful.
func (s *String) ReadASN1BitString(out *encoding_asn1.BitString) bool {
var bytes String
if !s.ReadASN1(&bytes, asn1.BIT_STRING) || len(bytes) == 0 ||
len(bytes)*8/8 != len(bytes) {
return false
}
paddingBits := bytes[0]
bytes = bytes[1:]
if paddingBits > 7 ||
len(bytes) == 0 && paddingBits != 0 ||
len(bytes) > 0 && bytes[len(bytes)-1]&(1<<paddingBits-1) != 0 {
return false
}
out.BitLength = len(bytes)*8 - int(paddingBits)
out.Bytes = bytes
return true
}
// ReadASN1BitStringAsBytes decodes an ASN.1 BIT STRING into out and advances. It is
// an error if the BIT STRING is not a whole number of bytes. It reports
// whether the read was successful.
func (s *String) ReadASN1BitStringAsBytes(out *[]byte) bool {
var bytes String
if !s.ReadASN1(&bytes, asn1.BIT_STRING) || len(bytes) == 0 {
return false
}
paddingBits := bytes[0]
if paddingBits != 0 {
return false
}
*out = bytes[1:]
return true
}
// ReadASN1Bytes reads the contents of a DER-encoded ASN.1 element (not including
// tag and length bytes) into out, and advances. The element must match the
// given tag. It reports whether the read was successful.
func (s *String) ReadASN1Bytes(out *[]byte, tag asn1.Tag) bool {
return s.ReadASN1((*String)(out), tag)
}
// ReadASN1 reads the contents of a DER-encoded ASN.1 element (not including
// tag and length bytes) into out, and advances. The element must match the
// given tag. It reports whether the read was successful.
//
// Tags greater than 30 are not supported (i.e. low-tag-number format only).
func (s *String) ReadASN1(out *String, tag asn1.Tag) bool {
var t asn1.Tag
if !s.ReadAnyASN1(out, &t) || t != tag {
return false
}
return true
}
// ReadASN1Element reads the contents of a DER-encoded ASN.1 element (including
// tag and length bytes) into out, and advances. The element must match the
// given tag. It reports whether the read was successful.
//
// Tags greater than 30 are not supported (i.e. low-tag-number format only).
func (s *String) ReadASN1Element(out *String, tag asn1.Tag) bool {
var t asn1.Tag
if !s.ReadAnyASN1Element(out, &t) || t != tag {
return false
}
return true
}
// ReadAnyASN1 reads the contents of a DER-encoded ASN.1 element (not including
// tag and length bytes) into out, sets outTag to its tag, and advances.
// It reports whether the read was successful.
//
// Tags greater than 30 are not supported (i.e. low-tag-number format only).
func (s *String) ReadAnyASN1(out *String, outTag *asn1.Tag) bool {
return s.readASN1(out, outTag, true /* skip header */)
}
// ReadAnyASN1Element reads the contents of a DER-encoded ASN.1 element
// (including tag and length bytes) into out, sets outTag to is tag, and
// advances. It reports whether the read was successful.
//
// Tags greater than 30 are not supported (i.e. low-tag-number format only).
func (s *String) ReadAnyASN1Element(out *String, outTag *asn1.Tag) bool {
return s.readASN1(out, outTag, false /* include header */)
}
// PeekASN1Tag reports whether the next ASN.1 value on the string starts with
// the given tag.
func (s String) PeekASN1Tag(tag asn1.Tag) bool {
if len(s) == 0 {
return false
}
return asn1.Tag(s[0]) == tag
}
// SkipASN1 reads and discards an ASN.1 element with the given tag. It
// reports whether the operation was successful.
func (s *String) SkipASN1(tag asn1.Tag) bool {
var unused String
return s.ReadASN1(&unused, tag)
}
// ReadOptionalASN1 attempts to read the contents of a DER-encoded ASN.1
// element (not including tag and length bytes) tagged with the given tag into
// out. It stores whether an element with the tag was found in outPresent,
// unless outPresent is nil. It reports whether the read was successful.
func (s *String) ReadOptionalASN1(out *String, outPresent *bool, tag asn1.Tag) bool {
present := s.PeekASN1Tag(tag)
if outPresent != nil {
*outPresent = present
}
if present && !s.ReadASN1(out, tag) {
return false
}
return true
}
// SkipOptionalASN1 advances s over an ASN.1 element with the given tag, or
// else leaves s unchanged. It reports whether the operation was successful.
func (s *String) SkipOptionalASN1(tag asn1.Tag) bool {
if !s.PeekASN1Tag(tag) {
return true
}
var unused String
return s.ReadASN1(&unused, tag)
}
// ReadOptionalASN1Integer attempts to read an optional ASN.1 INTEGER explicitly
// tagged with tag into out and advances. If no element with a matching tag is
// present, it writes defaultValue into out instead. Otherwise, it behaves like
// ReadASN1Integer.
func (s *String) ReadOptionalASN1Integer(out interface{}, tag asn1.Tag, defaultValue interface{}) bool {
var present bool
var i String
if !s.ReadOptionalASN1(&i, &present, tag) {
return false
}
if !present {
switch out.(type) {
case *int, *int8, *int16, *int32, *int64,
*uint, *uint8, *uint16, *uint32, *uint64, *[]byte:
reflect.ValueOf(out).Elem().Set(reflect.ValueOf(defaultValue))
case *big.Int:
if defaultValue, ok := defaultValue.(*big.Int); ok {
out.(*big.Int).Set(defaultValue)
} else {
panic("out points to big.Int, but defaultValue does not")
}
default:
panic("invalid integer type")
}
return true
}
if !i.ReadASN1Integer(out) || !i.Empty() {
return false
}
return true
}
// ReadOptionalASN1OctetString attempts to read an optional ASN.1 OCTET STRING
// explicitly tagged with tag into out and advances. If no element with a
// matching tag is present, it sets "out" to nil instead. It reports
// whether the read was successful.
func (s *String) ReadOptionalASN1OctetString(out *[]byte, outPresent *bool, tag asn1.Tag) bool {
var present bool
var child String
if !s.ReadOptionalASN1(&child, &present, tag) {
return false
}
if outPresent != nil {
*outPresent = present
}
if present {
var oct String
if !child.ReadASN1(&oct, asn1.OCTET_STRING) || !child.Empty() {
return false
}
*out = oct
} else {
*out = nil
}
return true
}
// ReadOptionalASN1Boolean attempts to read an optional ASN.1 BOOLEAN
// explicitly tagged with tag into out and advances. If no element with a
// matching tag is present, it sets "out" to defaultValue instead. It reports
// whether the read was successful.
func (s *String) ReadOptionalASN1Boolean(out *bool, tag asn1.Tag, defaultValue bool) bool {
var present bool
var child String
if !s.ReadOptionalASN1(&child, &present, tag) {
return false
}
if !present {
*out = defaultValue
return true
}
return child.ReadASN1Boolean(out)
}
func (s *String) readASN1(out *String, outTag *asn1.Tag, skipHeader bool) bool {
if len(*s) < 2 {
return false
}
tag, lenByte := (*s)[0], (*s)[1]
if tag&0x1f == 0x1f {
// ITU-T X.690 section 8.1.2
//
// An identifier octet with a tag part of 0x1f indicates a high-tag-number
// form identifier with two or more octets. We only support tags less than
// 31 (i.e. low-tag-number form, single octet identifier).
return false
}
if outTag != nil {
*outTag = asn1.Tag(tag)
}
// ITU-T X.690 section 8.1.3
//
// Bit 8 of the first length byte indicates whether the length is short- or
// long-form.
var length, headerLen uint32 // length includes headerLen
if lenByte&0x80 == 0 {
// Short-form length (section 8.1.3.4), encoded in bits 1-7.
length = uint32(lenByte) + 2
headerLen = 2
} else {
// Long-form length (section 8.1.3.5). Bits 1-7 encode the number of octets
// used to encode the length.
lenLen := lenByte & 0x7f
var len32 uint32
if lenLen == 0 || lenLen > 4 || len(*s) < int(2+lenLen) {
return false
}
lenBytes := String((*s)[2 : 2+lenLen])
if !lenBytes.readUnsigned(&len32, int(lenLen)) {
return false
}
// ITU-T X.690 section 10.1 (DER length forms) requires encoding the length
// with the minimum number of octets.
if len32 < 128 {
// Length should have used short-form encoding.
return false
}
if len32>>((lenLen-1)*8) == 0 {
// Leading octet is 0. Length should have been at least one byte shorter.
return false
}
headerLen = 2 + uint32(lenLen)
if headerLen+len32 < len32 {
// Overflow.
return false
}
length = headerLen + len32
}
if int(length) < 0 || !s.ReadBytes((*[]byte)(out), int(length)) {
return false
}
if skipHeader && !out.Skip(int(headerLen)) {
panic("cryptobyte: internal error")
}
return true
}

46
vendor/golang.org/x/crypto/cryptobyte/asn1/asn1.go generated vendored Normal file
View File

@ -0,0 +1,46 @@
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package asn1 contains supporting types for parsing and building ASN.1
// messages with the cryptobyte package.
package asn1
// Tag represents an ASN.1 identifier octet, consisting of a tag number
// (indicating a type) and class (such as context-specific or constructed).
//
// Methods in the cryptobyte package only support the low-tag-number form, i.e.
// a single identifier octet with bits 7-8 encoding the class and bits 1-6
// encoding the tag number.
type Tag uint8
const (
classConstructed = 0x20
classContextSpecific = 0x80
)
// Constructed returns t with the constructed class bit set.
func (t Tag) Constructed() Tag { return t | classConstructed }
// ContextSpecific returns t with the context-specific class bit set.
func (t Tag) ContextSpecific() Tag { return t | classContextSpecific }
// The following is a list of standard tag and class combinations.
const (
BOOLEAN = Tag(1)
INTEGER = Tag(2)
BIT_STRING = Tag(3)
OCTET_STRING = Tag(4)
NULL = Tag(5)
OBJECT_IDENTIFIER = Tag(6)
ENUM = Tag(10)
UTF8String = Tag(12)
SEQUENCE = Tag(16 | classConstructed)
SET = Tag(17 | classConstructed)
PrintableString = Tag(19)
T61String = Tag(20)
IA5String = Tag(22)
UTCTime = Tag(23)
GeneralizedTime = Tag(24)
GeneralString = Tag(27)
)

350
vendor/golang.org/x/crypto/cryptobyte/builder.go generated vendored Normal file
View File

@ -0,0 +1,350 @@
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package cryptobyte
import (
"errors"
"fmt"
)
// A Builder builds byte strings from fixed-length and length-prefixed values.
// Builders either allocate space as needed, or are fixed, which means that
// they write into a given buffer and produce an error if it's exhausted.
//
// The zero value is a usable Builder that allocates space as needed.
//
// Simple values are marshaled and appended to a Builder using methods on the
// Builder. Length-prefixed values are marshaled by providing a
// BuilderContinuation, which is a function that writes the inner contents of
// the value to a given Builder. See the documentation for BuilderContinuation
// for details.
type Builder struct {
err error
result []byte
fixedSize bool
child *Builder
offset int
pendingLenLen int
pendingIsASN1 bool
inContinuation *bool
}
// NewBuilder creates a Builder that appends its output to the given buffer.
// Like append(), the slice will be reallocated if its capacity is exceeded.
// Use Bytes to get the final buffer.
func NewBuilder(buffer []byte) *Builder {
return &Builder{
result: buffer,
}
}
// NewFixedBuilder creates a Builder that appends its output into the given
// buffer. This builder does not reallocate the output buffer. Writes that
// would exceed the buffer's capacity are treated as an error.
func NewFixedBuilder(buffer []byte) *Builder {
return &Builder{
result: buffer,
fixedSize: true,
}
}
// SetError sets the value to be returned as the error from Bytes. Writes
// performed after calling SetError are ignored.
func (b *Builder) SetError(err error) {
b.err = err
}
// Bytes returns the bytes written by the builder or an error if one has
// occurred during building.
func (b *Builder) Bytes() ([]byte, error) {
if b.err != nil {
return nil, b.err
}
return b.result[b.offset:], nil
}
// BytesOrPanic returns the bytes written by the builder or panics if an error
// has occurred during building.
func (b *Builder) BytesOrPanic() []byte {
if b.err != nil {
panic(b.err)
}
return b.result[b.offset:]
}
// AddUint8 appends an 8-bit value to the byte string.
func (b *Builder) AddUint8(v uint8) {
b.add(byte(v))
}
// AddUint16 appends a big-endian, 16-bit value to the byte string.
func (b *Builder) AddUint16(v uint16) {
b.add(byte(v>>8), byte(v))
}
// AddUint24 appends a big-endian, 24-bit value to the byte string. The highest
// byte of the 32-bit input value is silently truncated.
func (b *Builder) AddUint24(v uint32) {
b.add(byte(v>>16), byte(v>>8), byte(v))
}
// AddUint32 appends a big-endian, 32-bit value to the byte string.
func (b *Builder) AddUint32(v uint32) {
b.add(byte(v>>24), byte(v>>16), byte(v>>8), byte(v))
}
// AddUint48 appends a big-endian, 48-bit value to the byte string.
func (b *Builder) AddUint48(v uint64) {
b.add(byte(v>>40), byte(v>>32), byte(v>>24), byte(v>>16), byte(v>>8), byte(v))
}
// AddUint64 appends a big-endian, 64-bit value to the byte string.
func (b *Builder) AddUint64(v uint64) {
b.add(byte(v>>56), byte(v>>48), byte(v>>40), byte(v>>32), byte(v>>24), byte(v>>16), byte(v>>8), byte(v))
}
// AddBytes appends a sequence of bytes to the byte string.
func (b *Builder) AddBytes(v []byte) {
b.add(v...)
}
// BuilderContinuation is a continuation-passing interface for building
// length-prefixed byte sequences. Builder methods for length-prefixed
// sequences (AddUint8LengthPrefixed etc) will invoke the BuilderContinuation
// supplied to them. The child builder passed to the continuation can be used
// to build the content of the length-prefixed sequence. For example:
//
// parent := cryptobyte.NewBuilder()
// parent.AddUint8LengthPrefixed(func (child *Builder) {
// child.AddUint8(42)
// child.AddUint8LengthPrefixed(func (grandchild *Builder) {
// grandchild.AddUint8(5)
// })
// })
//
// It is an error to write more bytes to the child than allowed by the reserved
// length prefix. After the continuation returns, the child must be considered
// invalid, i.e. users must not store any copies or references of the child
// that outlive the continuation.
//
// If the continuation panics with a value of type BuildError then the inner
// error will be returned as the error from Bytes. If the child panics
// otherwise then Bytes will repanic with the same value.
type BuilderContinuation func(child *Builder)
// BuildError wraps an error. If a BuilderContinuation panics with this value,
// the panic will be recovered and the inner error will be returned from
// Builder.Bytes.
type BuildError struct {
Err error
}
// AddUint8LengthPrefixed adds a 8-bit length-prefixed byte sequence.
func (b *Builder) AddUint8LengthPrefixed(f BuilderContinuation) {
b.addLengthPrefixed(1, false, f)
}
// AddUint16LengthPrefixed adds a big-endian, 16-bit length-prefixed byte sequence.
func (b *Builder) AddUint16LengthPrefixed(f BuilderContinuation) {
b.addLengthPrefixed(2, false, f)
}
// AddUint24LengthPrefixed adds a big-endian, 24-bit length-prefixed byte sequence.
func (b *Builder) AddUint24LengthPrefixed(f BuilderContinuation) {
b.addLengthPrefixed(3, false, f)
}
// AddUint32LengthPrefixed adds a big-endian, 32-bit length-prefixed byte sequence.
func (b *Builder) AddUint32LengthPrefixed(f BuilderContinuation) {
b.addLengthPrefixed(4, false, f)
}
func (b *Builder) callContinuation(f BuilderContinuation, arg *Builder) {
if !*b.inContinuation {
*b.inContinuation = true
defer func() {
*b.inContinuation = false
r := recover()
if r == nil {
return
}
if buildError, ok := r.(BuildError); ok {
b.err = buildError.Err
} else {
panic(r)
}
}()
}
f(arg)
}
func (b *Builder) addLengthPrefixed(lenLen int, isASN1 bool, f BuilderContinuation) {
// Subsequent writes can be ignored if the builder has encountered an error.
if b.err != nil {
return
}
offset := len(b.result)
b.add(make([]byte, lenLen)...)
if b.inContinuation == nil {
b.inContinuation = new(bool)
}
b.child = &Builder{
result: b.result,
fixedSize: b.fixedSize,
offset: offset,
pendingLenLen: lenLen,
pendingIsASN1: isASN1,
inContinuation: b.inContinuation,
}
b.callContinuation(f, b.child)
b.flushChild()
if b.child != nil {
panic("cryptobyte: internal error")
}
}
func (b *Builder) flushChild() {
if b.child == nil {
return
}
b.child.flushChild()
child := b.child
b.child = nil
if child.err != nil {
b.err = child.err
return
}
length := len(child.result) - child.pendingLenLen - child.offset
if length < 0 {
panic("cryptobyte: internal error") // result unexpectedly shrunk
}
if child.pendingIsASN1 {
// For ASN.1, we reserved a single byte for the length. If that turned out
// to be incorrect, we have to move the contents along in order to make
// space.
if child.pendingLenLen != 1 {
panic("cryptobyte: internal error")
}
var lenLen, lenByte uint8
if int64(length) > 0xfffffffe {
b.err = errors.New("pending ASN.1 child too long")
return
} else if length > 0xffffff {
lenLen = 5
lenByte = 0x80 | 4
} else if length > 0xffff {
lenLen = 4
lenByte = 0x80 | 3
} else if length > 0xff {
lenLen = 3
lenByte = 0x80 | 2
} else if length > 0x7f {
lenLen = 2
lenByte = 0x80 | 1
} else {
lenLen = 1
lenByte = uint8(length)
length = 0
}
// Insert the initial length byte, make space for successive length bytes,
// and adjust the offset.
child.result[child.offset] = lenByte
extraBytes := int(lenLen - 1)
if extraBytes != 0 {
child.add(make([]byte, extraBytes)...)
childStart := child.offset + child.pendingLenLen
copy(child.result[childStart+extraBytes:], child.result[childStart:])
}
child.offset++
child.pendingLenLen = extraBytes
}
l := length
for i := child.pendingLenLen - 1; i >= 0; i-- {
child.result[child.offset+i] = uint8(l)
l >>= 8
}
if l != 0 {
b.err = fmt.Errorf("cryptobyte: pending child length %d exceeds %d-byte length prefix", length, child.pendingLenLen)
return
}
if b.fixedSize && &b.result[0] != &child.result[0] {
panic("cryptobyte: BuilderContinuation reallocated a fixed-size buffer")
}
b.result = child.result
}
func (b *Builder) add(bytes ...byte) {
if b.err != nil {
return
}
if b.child != nil {
panic("cryptobyte: attempted write while child is pending")
}
if len(b.result)+len(bytes) < len(bytes) {
b.err = errors.New("cryptobyte: length overflow")
}
if b.fixedSize && len(b.result)+len(bytes) > cap(b.result) {
b.err = errors.New("cryptobyte: Builder is exceeding its fixed-size buffer")
return
}
b.result = append(b.result, bytes...)
}
// Unwrite rolls back non-negative n bytes written directly to the Builder.
// An attempt by a child builder passed to a continuation to unwrite bytes
// from its parent will panic.
func (b *Builder) Unwrite(n int) {
if b.err != nil {
return
}
if b.child != nil {
panic("cryptobyte: attempted unwrite while child is pending")
}
length := len(b.result) - b.pendingLenLen - b.offset
if length < 0 {
panic("cryptobyte: internal error")
}
if n < 0 {
panic("cryptobyte: attempted to unwrite negative number of bytes")
}
if n > length {
panic("cryptobyte: attempted to unwrite more than was written")
}
b.result = b.result[:len(b.result)-n]
}
// A MarshalingValue marshals itself into a Builder.
type MarshalingValue interface {
// Marshal is called by Builder.AddValue. It receives a pointer to a builder
// to marshal itself into. It may return an error that occurred during
// marshaling, such as unset or invalid values.
Marshal(b *Builder) error
}
// AddValue calls Marshal on v, passing a pointer to the builder to append to.
// If Marshal returns an error, it is set on the Builder so that subsequent
// appends don't have an effect.
func (b *Builder) AddValue(v MarshalingValue) {
err := v.Marshal(b)
if err != nil {
b.err = err
}
}

183
vendor/golang.org/x/crypto/cryptobyte/string.go generated vendored Normal file
View File

@ -0,0 +1,183 @@
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package cryptobyte contains types that help with parsing and constructing
// length-prefixed, binary messages, including ASN.1 DER. (The asn1 subpackage
// contains useful ASN.1 constants.)
//
// The String type is for parsing. It wraps a []byte slice and provides helper
// functions for consuming structures, value by value.
//
// The Builder type is for constructing messages. It providers helper functions
// for appending values and also for appending length-prefixed submessages
// without having to worry about calculating the length prefix ahead of time.
//
// See the documentation and examples for the Builder and String types to get
// started.
package cryptobyte
// String represents a string of bytes. It provides methods for parsing
// fixed-length and length-prefixed values from it.
type String []byte
// read advances a String by n bytes and returns them. If less than n bytes
// remain, it returns nil.
func (s *String) read(n int) []byte {
if len(*s) < n || n < 0 {
return nil
}
v := (*s)[:n]
*s = (*s)[n:]
return v
}
// Skip advances the String by n byte and reports whether it was successful.
func (s *String) Skip(n int) bool {
return s.read(n) != nil
}
// ReadUint8 decodes an 8-bit value into out and advances over it.
// It reports whether the read was successful.
func (s *String) ReadUint8(out *uint8) bool {
v := s.read(1)
if v == nil {
return false
}
*out = uint8(v[0])
return true
}
// ReadUint16 decodes a big-endian, 16-bit value into out and advances over it.
// It reports whether the read was successful.
func (s *String) ReadUint16(out *uint16) bool {
v := s.read(2)
if v == nil {
return false
}
*out = uint16(v[0])<<8 | uint16(v[1])
return true
}
// ReadUint24 decodes a big-endian, 24-bit value into out and advances over it.
// It reports whether the read was successful.
func (s *String) ReadUint24(out *uint32) bool {
v := s.read(3)
if v == nil {
return false
}
*out = uint32(v[0])<<16 | uint32(v[1])<<8 | uint32(v[2])
return true
}
// ReadUint32 decodes a big-endian, 32-bit value into out and advances over it.
// It reports whether the read was successful.
func (s *String) ReadUint32(out *uint32) bool {
v := s.read(4)
if v == nil {
return false
}
*out = uint32(v[0])<<24 | uint32(v[1])<<16 | uint32(v[2])<<8 | uint32(v[3])
return true
}
// ReadUint48 decodes a big-endian, 48-bit value into out and advances over it.
// It reports whether the read was successful.
func (s *String) ReadUint48(out *uint64) bool {
v := s.read(6)
if v == nil {
return false
}
*out = uint64(v[0])<<40 | uint64(v[1])<<32 | uint64(v[2])<<24 | uint64(v[3])<<16 | uint64(v[4])<<8 | uint64(v[5])
return true
}
// ReadUint64 decodes a big-endian, 64-bit value into out and advances over it.
// It reports whether the read was successful.
func (s *String) ReadUint64(out *uint64) bool {
v := s.read(8)
if v == nil {
return false
}
*out = uint64(v[0])<<56 | uint64(v[1])<<48 | uint64(v[2])<<40 | uint64(v[3])<<32 | uint64(v[4])<<24 | uint64(v[5])<<16 | uint64(v[6])<<8 | uint64(v[7])
return true
}
func (s *String) readUnsigned(out *uint32, length int) bool {
v := s.read(length)
if v == nil {
return false
}
var result uint32
for i := 0; i < length; i++ {
result <<= 8
result |= uint32(v[i])
}
*out = result
return true
}
func (s *String) readLengthPrefixed(lenLen int, outChild *String) bool {
lenBytes := s.read(lenLen)
if lenBytes == nil {
return false
}
var length uint32
for _, b := range lenBytes {
length = length << 8
length = length | uint32(b)
}
v := s.read(int(length))
if v == nil {
return false
}
*outChild = v
return true
}
// ReadUint8LengthPrefixed reads the content of an 8-bit length-prefixed value
// into out and advances over it. It reports whether the read was successful.
func (s *String) ReadUint8LengthPrefixed(out *String) bool {
return s.readLengthPrefixed(1, out)
}
// ReadUint16LengthPrefixed reads the content of a big-endian, 16-bit
// length-prefixed value into out and advances over it. It reports whether the
// read was successful.
func (s *String) ReadUint16LengthPrefixed(out *String) bool {
return s.readLengthPrefixed(2, out)
}
// ReadUint24LengthPrefixed reads the content of a big-endian, 24-bit
// length-prefixed value into out and advances over it. It reports whether
// the read was successful.
func (s *String) ReadUint24LengthPrefixed(out *String) bool {
return s.readLengthPrefixed(3, out)
}
// ReadBytes reads n bytes into out and advances over them. It reports
// whether the read was successful.
func (s *String) ReadBytes(out *[]byte, n int) bool {
v := s.read(n)
if v == nil {
return false
}
*out = v
return true
}
// CopyBytes copies len(out) bytes into out and advances over them. It reports
// whether the copy operation was successful
func (s *String) CopyBytes(out []byte) bool {
n := len(out)
v := s.read(n)
if v == nil {
return false
}
return copy(out, v) == n
}
// Empty reports whether the string does not contain any bytes.
func (s String) Empty() bool {
return len(s) == 0
}

View File

@ -25,6 +25,11 @@ const debugHandshake = false
// quickly.
const chanSize = 16
// maxPendingPackets sets the maximum number of packets to queue while waiting
// for KEX to complete. This limits the total pending data to maxPendingPackets
// * maxPacket bytes, which is ~16.8MB.
const maxPendingPackets = 64
// keyingTransport is a packet based transport that supports key
// changes. It need not be thread-safe. It should pass through
// msgNewKeys in both directions.
@ -73,13 +78,22 @@ type handshakeTransport struct {
incoming chan []byte
readError error
mu sync.Mutex
writeError error
sentInitPacket []byte
sentInitMsg *kexInitMsg
pendingPackets [][]byte // Used when a key exchange is in progress.
mu sync.Mutex
// Condition for the above mutex. It is used to notify a completed key
// exchange or a write failure. Writes can wait for this condition while a
// key exchange is in progress.
writeCond *sync.Cond
writeError error
sentInitPacket []byte
sentInitMsg *kexInitMsg
// Used to queue writes when a key exchange is in progress. The length is
// limited by pendingPacketsSize. Once full, writes will block until the key
// exchange is completed or an error occurs. If not empty, it is emptied
// all at once when the key exchange is completed in kexLoop.
pendingPackets [][]byte
writePacketsLeft uint32
writeBytesLeft int64
userAuthComplete bool // whether the user authentication phase is complete
// If the read loop wants to schedule a kex, it pings this
// channel, and the write loop will send out a kex
@ -133,6 +147,7 @@ func newHandshakeTransport(conn keyingTransport, config *Config, clientVersion,
config: config,
}
t.writeCond = sync.NewCond(&t.mu)
t.resetReadThresholds()
t.resetWriteThresholds()
@ -259,6 +274,7 @@ func (t *handshakeTransport) recordWriteError(err error) {
defer t.mu.Unlock()
if t.writeError == nil && err != nil {
t.writeError = err
t.writeCond.Broadcast()
}
}
@ -362,6 +378,8 @@ write:
}
}
t.pendingPackets = t.pendingPackets[:0]
// Unblock writePacket if waiting for KEX.
t.writeCond.Broadcast()
t.mu.Unlock()
}
@ -552,26 +570,44 @@ func (t *handshakeTransport) sendKexInit() error {
return nil
}
var errSendBannerPhase = errors.New("ssh: SendAuthBanner outside of authentication phase")
func (t *handshakeTransport) writePacket(p []byte) error {
t.mu.Lock()
defer t.mu.Unlock()
switch p[0] {
case msgKexInit:
return errors.New("ssh: only handshakeTransport can send kexInit")
case msgNewKeys:
return errors.New("ssh: only handshakeTransport can send newKeys")
case msgUserAuthBanner:
if t.userAuthComplete {
return errSendBannerPhase
}
case msgUserAuthSuccess:
t.userAuthComplete = true
}
t.mu.Lock()
defer t.mu.Unlock()
if t.writeError != nil {
return t.writeError
}
if t.sentInitMsg != nil {
// Copy the packet so the writer can reuse the buffer.
cp := make([]byte, len(p))
copy(cp, p)
t.pendingPackets = append(t.pendingPackets, cp)
return nil
if len(t.pendingPackets) < maxPendingPackets {
// Copy the packet so the writer can reuse the buffer.
cp := make([]byte, len(p))
copy(cp, p)
t.pendingPackets = append(t.pendingPackets, cp)
return nil
}
for t.sentInitMsg != nil {
// Block and wait for KEX to complete or an error.
t.writeCond.Wait()
if t.writeError != nil {
return t.writeError
}
}
}
if t.writeBytesLeft > 0 {
@ -588,6 +624,7 @@ func (t *handshakeTransport) writePacket(p []byte) error {
if err := t.pushPacket(p); err != nil {
t.writeError = err
t.writeCond.Broadcast()
}
return nil

View File

@ -818,6 +818,8 @@ func decode(packet []byte) (interface{}, error) {
return new(userAuthSuccessMsg), nil
case msgUserAuthFailure:
msg = new(userAuthFailureMsg)
case msgUserAuthBanner:
msg = new(userAuthBannerMsg)
case msgUserAuthPubKeyOk:
msg = new(userAuthPubKeyOkMsg)
case msgGlobalRequest:

View File

@ -59,6 +59,27 @@ type GSSAPIWithMICConfig struct {
Server GSSAPIServer
}
// SendAuthBanner implements [ServerPreAuthConn].
func (s *connection) SendAuthBanner(msg string) error {
return s.transport.writePacket(Marshal(&userAuthBannerMsg{
Message: msg,
}))
}
func (*connection) unexportedMethodForFutureProofing() {}
// ServerPreAuthConn is the interface available on an incoming server
// connection before authentication has completed.
type ServerPreAuthConn interface {
unexportedMethodForFutureProofing() // permits growing ServerPreAuthConn safely later, ala testing.TB
ConnMetadata
// SendAuthBanner sends a banner message to the client.
// It returns an error once the authentication phase has ended.
SendAuthBanner(string) error
}
// ServerConfig holds server specific configuration data.
type ServerConfig struct {
// Config contains configuration shared between client and server.
@ -118,6 +139,12 @@ type ServerConfig struct {
// attempts.
AuthLogCallback func(conn ConnMetadata, method string, err error)
// PreAuthConnCallback, if non-nil, is called upon receiving a new connection
// before any authentication has started. The provided ServerPreAuthConn
// can be used at any time before authentication is complete, including
// after this callback has returned.
PreAuthConnCallback func(ServerPreAuthConn)
// ServerVersion is the version identification string to announce in
// the public handshake.
// If empty, a reasonable default is used.
@ -488,6 +515,10 @@ func (b *BannerError) Error() string {
}
func (s *connection) serverAuthenticate(config *ServerConfig) (*Permissions, error) {
if config.PreAuthConnCallback != nil {
config.PreAuthConnCallback(s)
}
sessionID := s.transport.getSessionID()
var cache pubKeyCache
var perms *Permissions
@ -495,7 +526,7 @@ func (s *connection) serverAuthenticate(config *ServerConfig) (*Permissions, err
authFailures := 0
noneAuthCount := 0
var authErrs []error
var displayedBanner bool
var calledBannerCallback bool
partialSuccessReturned := false
// Set the initial authentication callbacks from the config. They can be
// changed if a PartialSuccessError is returned.
@ -542,14 +573,10 @@ userAuthLoop:
s.user = userAuthReq.User
if !displayedBanner && config.BannerCallback != nil {
displayedBanner = true
msg := config.BannerCallback(s)
if msg != "" {
bannerMsg := &userAuthBannerMsg{
Message: msg,
}
if err := s.transport.writePacket(Marshal(bannerMsg)); err != nil {
if !calledBannerCallback && config.BannerCallback != nil {
calledBannerCallback = true
if msg := config.BannerCallback(s); msg != "" {
if err := s.SendAuthBanner(msg); err != nil {
return nil, err
}
}
@ -762,10 +789,7 @@ userAuthLoop:
var bannerErr *BannerError
if errors.As(authErr, &bannerErr) {
if bannerErr.Message != "" {
bannerMsg := &userAuthBannerMsg{
Message: bannerErr.Message,
}
if err := s.transport.writePacket(Marshal(bannerMsg)); err != nil {
if err := s.SendAuthBanner(bannerErr.Message); err != nil {
return nil, err
}
}

View File

@ -459,7 +459,7 @@ func (c *Client) dial(laddr string, lport int, raddr string, rport int) (Channel
return nil, err
}
go DiscardRequests(in)
return ch, err
return ch, nil
}
type tcpChan struct {

View File

@ -10,16 +10,13 @@ import (
"slices"
)
// TODO(adonovan): when https://go.dev/issue/32816 is accepted, all of
// these functions should be annotated (provisionally with "//go:fix
// inline") so that tools can safely and automatically replace calls
// to exp/slices with calls to std slices by inlining them.
// Equal reports whether two slices are equal: the same length and all
// elements equal. If the lengths are different, Equal returns false.
// Otherwise, the elements are compared in increasing index order, and the
// comparison stops at the first unequal pair.
// Floating point NaNs are not considered equal.
//
//go:fix inline
func Equal[S ~[]E, E comparable](s1, s2 S) bool {
return slices.Equal(s1, s2)
}
@ -29,6 +26,8 @@ func Equal[S ~[]E, E comparable](s1, s2 S) bool {
// EqualFunc returns false. Otherwise, the elements are compared in
// increasing index order, and the comparison stops at the first index
// for which eq returns false.
//
//go:fix inline
func EqualFunc[S1 ~[]E1, S2 ~[]E2, E1, E2 any](s1 S1, s2 S2, eq func(E1, E2) bool) bool {
return slices.EqualFunc(s1, s2, eq)
}
@ -40,6 +39,8 @@ func EqualFunc[S1 ~[]E1, S2 ~[]E2, E1, E2 any](s1 S1, s2 S2, eq func(E1, E2) boo
// If both slices are equal until one of them ends, the shorter slice is
// considered less than the longer one.
// The result is 0 if s1 == s2, -1 if s1 < s2, and +1 if s1 > s2.
//
//go:fix inline
func Compare[S ~[]E, E cmp.Ordered](s1, s2 S) int {
return slices.Compare(s1, s2)
}
@ -49,29 +50,39 @@ func Compare[S ~[]E, E cmp.Ordered](s1, s2 S) int {
// The result is the first non-zero result of cmp; if cmp always
// returns 0 the result is 0 if len(s1) == len(s2), -1 if len(s1) < len(s2),
// and +1 if len(s1) > len(s2).
//
//go:fix inline
func CompareFunc[S1 ~[]E1, S2 ~[]E2, E1, E2 any](s1 S1, s2 S2, cmp func(E1, E2) int) int {
return slices.CompareFunc(s1, s2, cmp)
}
// Index returns the index of the first occurrence of v in s,
// or -1 if not present.
//
//go:fix inline
func Index[S ~[]E, E comparable](s S, v E) int {
return slices.Index(s, v)
}
// IndexFunc returns the first index i satisfying f(s[i]),
// or -1 if none do.
//
//go:fix inline
func IndexFunc[S ~[]E, E any](s S, f func(E) bool) int {
return slices.IndexFunc(s, f)
}
// Contains reports whether v is present in s.
//
//go:fix inline
func Contains[S ~[]E, E comparable](s S, v E) bool {
return slices.Contains(s, v)
}
// ContainsFunc reports whether at least one
// element e of s satisfies f(e).
//
//go:fix inline
func ContainsFunc[S ~[]E, E any](s S, f func(E) bool) bool {
return slices.ContainsFunc(s, f)
}
@ -83,6 +94,8 @@ func ContainsFunc[S ~[]E, E any](s S, f func(E) bool) bool {
// and r[i+len(v)] == value originally at r[i].
// Insert panics if i is out of range.
// This function is O(len(s) + len(v)).
//
//go:fix inline
func Insert[S ~[]E, E any](s S, i int, v ...E) S {
return slices.Insert(s, i, v...)
}
@ -92,6 +105,8 @@ func Insert[S ~[]E, E any](s S, i int, v ...E) S {
// Delete is O(len(s)-i), so if many items must be deleted, it is better to
// make a single call deleting them all together than to delete one at a time.
// Delete zeroes the elements s[len(s)-(j-i):len(s)].
//
//go:fix inline
func Delete[S ~[]E, E any](s S, i, j int) S {
return slices.Delete(s, i, j)
}
@ -99,6 +114,8 @@ func Delete[S ~[]E, E any](s S, i, j int) S {
// DeleteFunc removes any elements from s for which del returns true,
// returning the modified slice.
// DeleteFunc zeroes the elements between the new length and the original length.
//
//go:fix inline
func DeleteFunc[S ~[]E, E any](s S, del func(E) bool) S {
return slices.DeleteFunc(s, del)
}
@ -106,12 +123,16 @@ func DeleteFunc[S ~[]E, E any](s S, del func(E) bool) S {
// Replace replaces the elements s[i:j] by the given v, and returns the
// modified slice. Replace panics if s[i:j] is not a valid slice of s.
// When len(v) < (j-i), Replace zeroes the elements between the new length and the original length.
//
//go:fix inline
func Replace[S ~[]E, E any](s S, i, j int, v ...E) S {
return slices.Replace(s, i, j, v...)
}
// Clone returns a copy of the slice.
// The elements are copied using assignment, so this is a shallow clone.
//
//go:fix inline
func Clone[S ~[]E, E any](s S) S {
return slices.Clone(s)
}
@ -121,6 +142,8 @@ func Clone[S ~[]E, E any](s S) S {
// Compact modifies the contents of the slice s and returns the modified slice,
// which may have a smaller length.
// Compact zeroes the elements between the new length and the original length.
//
//go:fix inline
func Compact[S ~[]E, E comparable](s S) S {
return slices.Compact(s)
}
@ -128,6 +151,8 @@ func Compact[S ~[]E, E comparable](s S) S {
// CompactFunc is like [Compact] but uses an equality function to compare elements.
// For runs of elements that compare equal, CompactFunc keeps the first one.
// CompactFunc zeroes the elements between the new length and the original length.
//
//go:fix inline
func CompactFunc[S ~[]E, E any](s S, eq func(E, E) bool) S {
return slices.CompactFunc(s, eq)
}
@ -136,16 +161,22 @@ func CompactFunc[S ~[]E, E any](s S, eq func(E, E) bool) S {
// another n elements. After Grow(n), at least n elements can be appended
// to the slice without another allocation. If n is negative or too large to
// allocate the memory, Grow panics.
//
//go:fix inline
func Grow[S ~[]E, E any](s S, n int) S {
return slices.Grow(s, n)
}
// Clip removes unused capacity from the slice, returning s[:len(s):len(s)].
//
//go:fix inline
func Clip[S ~[]E, E any](s S) S {
return slices.Clip(s)
}
// Reverse reverses the elements of the slice in place.
//
//go:fix inline
func Reverse[S ~[]E, E any](s S) {
slices.Reverse(s)
}

View File

@ -9,11 +9,10 @@ import (
"slices"
)
// TODO(adonovan): add a "//go:fix inline" annotation to each function
// in this file; see https://go.dev/issue/32816.
// Sort sorts a slice of any ordered type in ascending order.
// When sorting floating-point numbers, NaNs are ordered before other values.
//
//go:fix inline
func Sort[S ~[]E, E cmp.Ordered](x S) {
slices.Sort(x)
}
@ -27,23 +26,31 @@ func Sort[S ~[]E, E cmp.Ordered](x S) {
// SortFunc requires that cmp is a strict weak ordering.
// See https://en.wikipedia.org/wiki/Weak_ordering#Strict_weak_orderings.
// To indicate 'uncomparable', return 0 from the function.
//
//go:fix inline
func SortFunc[S ~[]E, E any](x S, cmp func(a, b E) int) {
slices.SortFunc(x, cmp)
}
// SortStableFunc sorts the slice x while keeping the original order of equal
// elements, using cmp to compare elements in the same way as [SortFunc].
//
//go:fix inline
func SortStableFunc[S ~[]E, E any](x S, cmp func(a, b E) int) {
slices.SortStableFunc(x, cmp)
}
// IsSorted reports whether x is sorted in ascending order.
//
//go:fix inline
func IsSorted[S ~[]E, E cmp.Ordered](x S) bool {
return slices.IsSorted(x)
}
// IsSortedFunc reports whether x is sorted in ascending order, with cmp as the
// comparison function as defined by [SortFunc].
//
//go:fix inline
func IsSortedFunc[S ~[]E, E any](x S, cmp func(a, b E) int) bool {
return slices.IsSortedFunc(x, cmp)
}
@ -51,6 +58,8 @@ func IsSortedFunc[S ~[]E, E any](x S, cmp func(a, b E) int) bool {
// Min returns the minimal value in x. It panics if x is empty.
// For floating-point numbers, Min propagates NaNs (any NaN value in x
// forces the output to be NaN).
//
//go:fix inline
func Min[S ~[]E, E cmp.Ordered](x S) E {
return slices.Min(x)
}
@ -58,6 +67,8 @@ func Min[S ~[]E, E cmp.Ordered](x S) E {
// MinFunc returns the minimal value in x, using cmp to compare elements.
// It panics if x is empty. If there is more than one minimal element
// according to the cmp function, MinFunc returns the first one.
//
//go:fix inline
func MinFunc[S ~[]E, E any](x S, cmp func(a, b E) int) E {
return slices.MinFunc(x, cmp)
}
@ -65,6 +76,8 @@ func MinFunc[S ~[]E, E any](x S, cmp func(a, b E) int) E {
// Max returns the maximal value in x. It panics if x is empty.
// For floating-point E, Max propagates NaNs (any NaN value in x
// forces the output to be NaN).
//
//go:fix inline
func Max[S ~[]E, E cmp.Ordered](x S) E {
return slices.Max(x)
}
@ -72,6 +85,8 @@ func Max[S ~[]E, E cmp.Ordered](x S) E {
// MaxFunc returns the maximal value in x, using cmp to compare elements.
// It panics if x is empty. If there is more than one maximal element
// according to the cmp function, MaxFunc returns the first one.
//
//go:fix inline
func MaxFunc[S ~[]E, E any](x S, cmp func(a, b E) int) E {
return slices.MaxFunc(x, cmp)
}
@ -80,6 +95,8 @@ func MaxFunc[S ~[]E, E any](x S, cmp func(a, b E) int) E {
// where target is found, or the position where target would appear in the
// sort order; it also returns a bool saying whether the target is really found
// in the slice. The slice must be sorted in increasing order.
//
//go:fix inline
func BinarySearch[S ~[]E, E cmp.Ordered](x S, target E) (int, bool) {
return slices.BinarySearch(x, target)
}
@ -91,6 +108,8 @@ func BinarySearch[S ~[]E, E cmp.Ordered](x S, target E) (int, bool) {
// or a positive number if the slice element follows the target.
// cmp must implement the same ordering as the slice, such that if
// cmp(a, t) < 0 and cmp(b, t) >= 0, then a must precede b in the slice.
//
//go:fix inline
func BinarySearchFunc[S ~[]E, E, T any](x S, target T, cmp func(E, T) int) (int, bool) {
return slices.BinarySearchFunc(x, target, cmp)
}

27
vendor/golang.org/x/mod/LICENSE generated vendored
View File

@ -1,27 +0,0 @@
Copyright 2009 The Go Authors.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google LLC nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

22
vendor/golang.org/x/mod/PATENTS generated vendored
View File

@ -1,22 +0,0 @@
Additional IP Rights Grant (Patents)
"This implementation" means the copyrightable works distributed by
Google as part of the Go project.
Google hereby grants to You a perpetual, worldwide, non-exclusive,
no-charge, royalty-free, irrevocable (except as stated in this section)
patent license to make, have made, use, offer to sell, sell, import,
transfer and otherwise run, modify and propagate the contents of this
implementation of Go, where such license applies only to those patent
claims, both currently owned or controlled by Google and acquired in
the future, licensable by Google that are necessarily infringed by this
implementation of Go. This grant does not include claims that would be
infringed only as a consequence of further modification of this
implementation. If you or your agent or exclusive licensee institute or
order or agree to the institution of patent litigation against any
entity (including a cross-claim or counterclaim in a lawsuit) alleging
that this implementation of Go or any code incorporated within this
implementation of Go constitutes direct or contributory patent
infringement, or inducement of patent infringement, then any patent
rights granted to you under this License for this implementation of Go
shall terminate as of the date such litigation is filed.

View File

@ -1,401 +0,0 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package semver implements comparison of semantic version strings.
// In this package, semantic version strings must begin with a leading "v",
// as in "v1.0.0".
//
// The general form of a semantic version string accepted by this package is
//
// vMAJOR[.MINOR[.PATCH[-PRERELEASE][+BUILD]]]
//
// where square brackets indicate optional parts of the syntax;
// MAJOR, MINOR, and PATCH are decimal integers without extra leading zeros;
// PRERELEASE and BUILD are each a series of non-empty dot-separated identifiers
// using only alphanumeric characters and hyphens; and
// all-numeric PRERELEASE identifiers must not have leading zeros.
//
// This package follows Semantic Versioning 2.0.0 (see semver.org)
// with two exceptions. First, it requires the "v" prefix. Second, it recognizes
// vMAJOR and vMAJOR.MINOR (with no prerelease or build suffixes)
// as shorthands for vMAJOR.0.0 and vMAJOR.MINOR.0.
package semver
import "sort"
// parsed returns the parsed form of a semantic version string.
type parsed struct {
major string
minor string
patch string
short string
prerelease string
build string
}
// IsValid reports whether v is a valid semantic version string.
func IsValid(v string) bool {
_, ok := parse(v)
return ok
}
// Canonical returns the canonical formatting of the semantic version v.
// It fills in any missing .MINOR or .PATCH and discards build metadata.
// Two semantic versions compare equal only if their canonical formattings
// are identical strings.
// The canonical invalid semantic version is the empty string.
func Canonical(v string) string {
p, ok := parse(v)
if !ok {
return ""
}
if p.build != "" {
return v[:len(v)-len(p.build)]
}
if p.short != "" {
return v + p.short
}
return v
}
// Major returns the major version prefix of the semantic version v.
// For example, Major("v2.1.0") == "v2".
// If v is an invalid semantic version string, Major returns the empty string.
func Major(v string) string {
pv, ok := parse(v)
if !ok {
return ""
}
return v[:1+len(pv.major)]
}
// MajorMinor returns the major.minor version prefix of the semantic version v.
// For example, MajorMinor("v2.1.0") == "v2.1".
// If v is an invalid semantic version string, MajorMinor returns the empty string.
func MajorMinor(v string) string {
pv, ok := parse(v)
if !ok {
return ""
}
i := 1 + len(pv.major)
if j := i + 1 + len(pv.minor); j <= len(v) && v[i] == '.' && v[i+1:j] == pv.minor {
return v[:j]
}
return v[:i] + "." + pv.minor
}
// Prerelease returns the prerelease suffix of the semantic version v.
// For example, Prerelease("v2.1.0-pre+meta") == "-pre".
// If v is an invalid semantic version string, Prerelease returns the empty string.
func Prerelease(v string) string {
pv, ok := parse(v)
if !ok {
return ""
}
return pv.prerelease
}
// Build returns the build suffix of the semantic version v.
// For example, Build("v2.1.0+meta") == "+meta".
// If v is an invalid semantic version string, Build returns the empty string.
func Build(v string) string {
pv, ok := parse(v)
if !ok {
return ""
}
return pv.build
}
// Compare returns an integer comparing two versions according to
// semantic version precedence.
// The result will be 0 if v == w, -1 if v < w, or +1 if v > w.
//
// An invalid semantic version string is considered less than a valid one.
// All invalid semantic version strings compare equal to each other.
func Compare(v, w string) int {
pv, ok1 := parse(v)
pw, ok2 := parse(w)
if !ok1 && !ok2 {
return 0
}
if !ok1 {
return -1
}
if !ok2 {
return +1
}
if c := compareInt(pv.major, pw.major); c != 0 {
return c
}
if c := compareInt(pv.minor, pw.minor); c != 0 {
return c
}
if c := compareInt(pv.patch, pw.patch); c != 0 {
return c
}
return comparePrerelease(pv.prerelease, pw.prerelease)
}
// Max canonicalizes its arguments and then returns the version string
// that compares greater.
//
// Deprecated: use [Compare] instead. In most cases, returning a canonicalized
// version is not expected or desired.
func Max(v, w string) string {
v = Canonical(v)
w = Canonical(w)
if Compare(v, w) > 0 {
return v
}
return w
}
// ByVersion implements [sort.Interface] for sorting semantic version strings.
type ByVersion []string
func (vs ByVersion) Len() int { return len(vs) }
func (vs ByVersion) Swap(i, j int) { vs[i], vs[j] = vs[j], vs[i] }
func (vs ByVersion) Less(i, j int) bool {
cmp := Compare(vs[i], vs[j])
if cmp != 0 {
return cmp < 0
}
return vs[i] < vs[j]
}
// Sort sorts a list of semantic version strings using [ByVersion].
func Sort(list []string) {
sort.Sort(ByVersion(list))
}
func parse(v string) (p parsed, ok bool) {
if v == "" || v[0] != 'v' {
return
}
p.major, v, ok = parseInt(v[1:])
if !ok {
return
}
if v == "" {
p.minor = "0"
p.patch = "0"
p.short = ".0.0"
return
}
if v[0] != '.' {
ok = false
return
}
p.minor, v, ok = parseInt(v[1:])
if !ok {
return
}
if v == "" {
p.patch = "0"
p.short = ".0"
return
}
if v[0] != '.' {
ok = false
return
}
p.patch, v, ok = parseInt(v[1:])
if !ok {
return
}
if len(v) > 0 && v[0] == '-' {
p.prerelease, v, ok = parsePrerelease(v)
if !ok {
return
}
}
if len(v) > 0 && v[0] == '+' {
p.build, v, ok = parseBuild(v)
if !ok {
return
}
}
if v != "" {
ok = false
return
}
ok = true
return
}
func parseInt(v string) (t, rest string, ok bool) {
if v == "" {
return
}
if v[0] < '0' || '9' < v[0] {
return
}
i := 1
for i < len(v) && '0' <= v[i] && v[i] <= '9' {
i++
}
if v[0] == '0' && i != 1 {
return
}
return v[:i], v[i:], true
}
func parsePrerelease(v string) (t, rest string, ok bool) {
// "A pre-release version MAY be denoted by appending a hyphen and
// a series of dot separated identifiers immediately following the patch version.
// Identifiers MUST comprise only ASCII alphanumerics and hyphen [0-9A-Za-z-].
// Identifiers MUST NOT be empty. Numeric identifiers MUST NOT include leading zeroes."
if v == "" || v[0] != '-' {
return
}
i := 1
start := 1
for i < len(v) && v[i] != '+' {
if !isIdentChar(v[i]) && v[i] != '.' {
return
}
if v[i] == '.' {
if start == i || isBadNum(v[start:i]) {
return
}
start = i + 1
}
i++
}
if start == i || isBadNum(v[start:i]) {
return
}
return v[:i], v[i:], true
}
func parseBuild(v string) (t, rest string, ok bool) {
if v == "" || v[0] != '+' {
return
}
i := 1
start := 1
for i < len(v) {
if !isIdentChar(v[i]) && v[i] != '.' {
return
}
if v[i] == '.' {
if start == i {
return
}
start = i + 1
}
i++
}
if start == i {
return
}
return v[:i], v[i:], true
}
func isIdentChar(c byte) bool {
return 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z' || '0' <= c && c <= '9' || c == '-'
}
func isBadNum(v string) bool {
i := 0
for i < len(v) && '0' <= v[i] && v[i] <= '9' {
i++
}
return i == len(v) && i > 1 && v[0] == '0'
}
func isNum(v string) bool {
i := 0
for i < len(v) && '0' <= v[i] && v[i] <= '9' {
i++
}
return i == len(v)
}
func compareInt(x, y string) int {
if x == y {
return 0
}
if len(x) < len(y) {
return -1
}
if len(x) > len(y) {
return +1
}
if x < y {
return -1
} else {
return +1
}
}
func comparePrerelease(x, y string) int {
// "When major, minor, and patch are equal, a pre-release version has
// lower precedence than a normal version.
// Example: 1.0.0-alpha < 1.0.0.
// Precedence for two pre-release versions with the same major, minor,
// and patch version MUST be determined by comparing each dot separated
// identifier from left to right until a difference is found as follows:
// identifiers consisting of only digits are compared numerically and
// identifiers with letters or hyphens are compared lexically in ASCII
// sort order. Numeric identifiers always have lower precedence than
// non-numeric identifiers. A larger set of pre-release fields has a
// higher precedence than a smaller set, if all of the preceding
// identifiers are equal.
// Example: 1.0.0-alpha < 1.0.0-alpha.1 < 1.0.0-alpha.beta <
// 1.0.0-beta < 1.0.0-beta.2 < 1.0.0-beta.11 < 1.0.0-rc.1 < 1.0.0."
if x == y {
return 0
}
if x == "" {
return +1
}
if y == "" {
return -1
}
for x != "" && y != "" {
x = x[1:] // skip - or .
y = y[1:] // skip - or .
var dx, dy string
dx, x = nextIdent(x)
dy, y = nextIdent(y)
if dx != dy {
ix := isNum(dx)
iy := isNum(dy)
if ix != iy {
if ix {
return -1
} else {
return +1
}
}
if ix {
if len(dx) < len(dy) {
return -1
}
if len(dx) > len(dy) {
return +1
}
}
if dx < dy {
return -1
} else {
return +1
}
}
}
if x == "" {
return -1
} else {
return +1
}
}
func nextIdent(x string) (dx, rest string) {
i := 0
for i < len(x) && x[i] != '.' {
i++
}
return x[:i], x[i:]
}

View File

@ -3,29 +3,31 @@
// license that can be found in the LICENSE file.
// Package context defines the Context type, which carries deadlines,
// cancelation signals, and other request-scoped values across API boundaries
// cancellation signals, and other request-scoped values across API boundaries
// and between processes.
// As of Go 1.7 this package is available in the standard library under the
// name context. https://golang.org/pkg/context.
// name [context], and migrating to it can be done automatically with [go fix].
//
// Incoming requests to a server should create a Context, and outgoing calls to
// servers should accept a Context. The chain of function calls between must
// propagate the Context, optionally replacing it with a modified copy created
// using WithDeadline, WithTimeout, WithCancel, or WithValue.
// Incoming requests to a server should create a [Context], and outgoing
// calls to servers should accept a Context. The chain of function
// calls between them must propagate the Context, optionally replacing
// it with a derived Context created using [WithCancel], [WithDeadline],
// [WithTimeout], or [WithValue].
//
// Programs that use Contexts should follow these rules to keep interfaces
// consistent across packages and enable static analysis tools to check context
// propagation:
//
// Do not store Contexts inside a struct type; instead, pass a Context
// explicitly to each function that needs it. The Context should be the first
// explicitly to each function that needs it. This is discussed further in
// https://go.dev/blog/context-and-structs. The Context should be the first
// parameter, typically named ctx:
//
// func DoSomething(ctx context.Context, arg Arg) error {
// // ... use ctx ...
// }
//
// Do not pass a nil Context, even if a function permits it. Pass context.TODO
// Do not pass a nil [Context], even if a function permits it. Pass [context.TODO]
// if you are unsure about which Context to use.
//
// Use context Values only for request-scoped data that transits processes and
@ -34,9 +36,30 @@
// The same Context may be passed to functions running in different goroutines;
// Contexts are safe for simultaneous use by multiple goroutines.
//
// See http://blog.golang.org/context for example code for a server that uses
// See https://go.dev/blog/context for example code for a server that uses
// Contexts.
package context // import "golang.org/x/net/context"
//
// [go fix]: https://go.dev/cmd/go#hdr-Update_packages_to_use_new_APIs
package context
import (
"context" // standard library's context, as of Go 1.7
"time"
)
// A Context carries a deadline, a cancellation signal, and other values across
// API boundaries.
//
// Context's methods may be called by multiple goroutines simultaneously.
type Context = context.Context
// Canceled is the error returned by [Context.Err] when the context is canceled
// for some reason other than its deadline passing.
var Canceled = context.Canceled
// DeadlineExceeded is the error returned by [Context.Err] when the context is canceled
// due to its deadline passing.
var DeadlineExceeded = context.DeadlineExceeded
// Background returns a non-nil, empty Context. It is never canceled, has no
// values, and has no deadline. It is typically used by the main function,
@ -49,8 +72,73 @@ func Background() Context {
// TODO returns a non-nil, empty Context. Code should use context.TODO when
// it's unclear which Context to use or it is not yet available (because the
// surrounding function has not yet been extended to accept a Context
// parameter). TODO is recognized by static analysis tools that determine
// whether Contexts are propagated correctly in a program.
// parameter).
func TODO() Context {
return todo
}
var (
background = context.Background()
todo = context.TODO()
)
// A CancelFunc tells an operation to abandon its work.
// A CancelFunc does not wait for the work to stop.
// A CancelFunc may be called by multiple goroutines simultaneously.
// After the first call, subsequent calls to a CancelFunc do nothing.
type CancelFunc = context.CancelFunc
// WithCancel returns a derived context that points to the parent context
// but has a new Done channel. The returned context's Done channel is closed
// when the returned cancel function is called or when the parent context's
// Done channel is closed, whichever happens first.
//
// Canceling this context releases resources associated with it, so code should
// call cancel as soon as the operations running in this [Context] complete.
func WithCancel(parent Context) (ctx Context, cancel CancelFunc) {
return context.WithCancel(parent)
}
// WithDeadline returns a derived context that points to the parent context
// but has the deadline adjusted to be no later than d. If the parent's
// deadline is already earlier than d, WithDeadline(parent, d) is semantically
// equivalent to parent. The returned [Context.Done] channel is closed when
// the deadline expires, when the returned cancel function is called,
// or when the parent context's Done channel is closed, whichever happens first.
//
// Canceling this context releases resources associated with it, so code should
// call cancel as soon as the operations running in this [Context] complete.
func WithDeadline(parent Context, d time.Time) (Context, CancelFunc) {
return context.WithDeadline(parent, d)
}
// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)).
//
// Canceling this context releases resources associated with it, so code should
// call cancel as soon as the operations running in this [Context] complete:
//
// func slowOperationWithTimeout(ctx context.Context) (Result, error) {
// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond)
// defer cancel() // releases resources if slowOperation completes before timeout elapses
// return slowOperation(ctx)
// }
func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) {
return context.WithTimeout(parent, timeout)
}
// WithValue returns a derived context that points to the parent Context.
// In the derived context, the value associated with key is val.
//
// Use context Values only for request-scoped data that transits processes and
// APIs, not for passing optional parameters to functions.
//
// The provided key must be comparable and should not be of type
// string or any other built-in type to avoid collisions between
// packages using context. Users of WithValue should define their own
// types for keys. To avoid allocating when assigning to an
// interface{}, context keys often have concrete type
// struct{}. Alternatively, exported context key variables' static
// type should be a pointer or interface.
func WithValue(parent Context, key, val interface{}) Context {
return context.WithValue(parent, key, val)
}

View File

@ -1,72 +0,0 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build go1.7
package context
import (
"context" // standard library's context, as of Go 1.7
"time"
)
var (
todo = context.TODO()
background = context.Background()
)
// Canceled is the error returned by Context.Err when the context is canceled.
var Canceled = context.Canceled
// DeadlineExceeded is the error returned by Context.Err when the context's
// deadline passes.
var DeadlineExceeded = context.DeadlineExceeded
// WithCancel returns a copy of parent with a new Done channel. The returned
// context's Done channel is closed when the returned cancel function is called
// or when the parent context's Done channel is closed, whichever happens first.
//
// Canceling this context releases resources associated with it, so code should
// call cancel as soon as the operations running in this Context complete.
func WithCancel(parent Context) (ctx Context, cancel CancelFunc) {
ctx, f := context.WithCancel(parent)
return ctx, f
}
// WithDeadline returns a copy of the parent context with the deadline adjusted
// to be no later than d. If the parent's deadline is already earlier than d,
// WithDeadline(parent, d) is semantically equivalent to parent. The returned
// context's Done channel is closed when the deadline expires, when the returned
// cancel function is called, or when the parent context's Done channel is
// closed, whichever happens first.
//
// Canceling this context releases resources associated with it, so code should
// call cancel as soon as the operations running in this Context complete.
func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) {
ctx, f := context.WithDeadline(parent, deadline)
return ctx, f
}
// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)).
//
// Canceling this context releases resources associated with it, so code should
// call cancel as soon as the operations running in this Context complete:
//
// func slowOperationWithTimeout(ctx context.Context) (Result, error) {
// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond)
// defer cancel() // releases resources if slowOperation completes before timeout elapses
// return slowOperation(ctx)
// }
func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) {
return WithDeadline(parent, time.Now().Add(timeout))
}
// WithValue returns a copy of parent in which the value associated with key is
// val.
//
// Use context Values only for request-scoped data that transits processes and
// APIs, not for passing optional parameters to functions.
func WithValue(parent Context, key interface{}, val interface{}) Context {
return context.WithValue(parent, key, val)
}

View File

@ -1,20 +0,0 @@
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build go1.9
package context
import "context" // standard library's context, as of Go 1.7
// A Context carries a deadline, a cancelation signal, and other values across
// API boundaries.
//
// Context's methods may be called by multiple goroutines simultaneously.
type Context = context.Context
// A CancelFunc tells an operation to abandon its work.
// A CancelFunc does not wait for the work to stop.
// After the first call, subsequent calls to a CancelFunc do nothing.
type CancelFunc = context.CancelFunc

View File

@ -1,300 +0,0 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !go1.7
package context
import (
"errors"
"fmt"
"sync"
"time"
)
// An emptyCtx is never canceled, has no values, and has no deadline. It is not
// struct{}, since vars of this type must have distinct addresses.
type emptyCtx int
func (*emptyCtx) Deadline() (deadline time.Time, ok bool) {
return
}
func (*emptyCtx) Done() <-chan struct{} {
return nil
}
func (*emptyCtx) Err() error {
return nil
}
func (*emptyCtx) Value(key interface{}) interface{} {
return nil
}
func (e *emptyCtx) String() string {
switch e {
case background:
return "context.Background"
case todo:
return "context.TODO"
}
return "unknown empty Context"
}
var (
background = new(emptyCtx)
todo = new(emptyCtx)
)
// Canceled is the error returned by Context.Err when the context is canceled.
var Canceled = errors.New("context canceled")
// DeadlineExceeded is the error returned by Context.Err when the context's
// deadline passes.
var DeadlineExceeded = errors.New("context deadline exceeded")
// WithCancel returns a copy of parent with a new Done channel. The returned
// context's Done channel is closed when the returned cancel function is called
// or when the parent context's Done channel is closed, whichever happens first.
//
// Canceling this context releases resources associated with it, so code should
// call cancel as soon as the operations running in this Context complete.
func WithCancel(parent Context) (ctx Context, cancel CancelFunc) {
c := newCancelCtx(parent)
propagateCancel(parent, c)
return c, func() { c.cancel(true, Canceled) }
}
// newCancelCtx returns an initialized cancelCtx.
func newCancelCtx(parent Context) *cancelCtx {
return &cancelCtx{
Context: parent,
done: make(chan struct{}),
}
}
// propagateCancel arranges for child to be canceled when parent is.
func propagateCancel(parent Context, child canceler) {
if parent.Done() == nil {
return // parent is never canceled
}
if p, ok := parentCancelCtx(parent); ok {
p.mu.Lock()
if p.err != nil {
// parent has already been canceled
child.cancel(false, p.err)
} else {
if p.children == nil {
p.children = make(map[canceler]bool)
}
p.children[child] = true
}
p.mu.Unlock()
} else {
go func() {
select {
case <-parent.Done():
child.cancel(false, parent.Err())
case <-child.Done():
}
}()
}
}
// parentCancelCtx follows a chain of parent references until it finds a
// *cancelCtx. This function understands how each of the concrete types in this
// package represents its parent.
func parentCancelCtx(parent Context) (*cancelCtx, bool) {
for {
switch c := parent.(type) {
case *cancelCtx:
return c, true
case *timerCtx:
return c.cancelCtx, true
case *valueCtx:
parent = c.Context
default:
return nil, false
}
}
}
// removeChild removes a context from its parent.
func removeChild(parent Context, child canceler) {
p, ok := parentCancelCtx(parent)
if !ok {
return
}
p.mu.Lock()
if p.children != nil {
delete(p.children, child)
}
p.mu.Unlock()
}
// A canceler is a context type that can be canceled directly. The
// implementations are *cancelCtx and *timerCtx.
type canceler interface {
cancel(removeFromParent bool, err error)
Done() <-chan struct{}
}
// A cancelCtx can be canceled. When canceled, it also cancels any children
// that implement canceler.
type cancelCtx struct {
Context
done chan struct{} // closed by the first cancel call.
mu sync.Mutex
children map[canceler]bool // set to nil by the first cancel call
err error // set to non-nil by the first cancel call
}
func (c *cancelCtx) Done() <-chan struct{} {
return c.done
}
func (c *cancelCtx) Err() error {
c.mu.Lock()
defer c.mu.Unlock()
return c.err
}
func (c *cancelCtx) String() string {
return fmt.Sprintf("%v.WithCancel", c.Context)
}
// cancel closes c.done, cancels each of c's children, and, if
// removeFromParent is true, removes c from its parent's children.
func (c *cancelCtx) cancel(removeFromParent bool, err error) {
if err == nil {
panic("context: internal error: missing cancel error")
}
c.mu.Lock()
if c.err != nil {
c.mu.Unlock()
return // already canceled
}
c.err = err
close(c.done)
for child := range c.children {
// NOTE: acquiring the child's lock while holding parent's lock.
child.cancel(false, err)
}
c.children = nil
c.mu.Unlock()
if removeFromParent {
removeChild(c.Context, c)
}
}
// WithDeadline returns a copy of the parent context with the deadline adjusted
// to be no later than d. If the parent's deadline is already earlier than d,
// WithDeadline(parent, d) is semantically equivalent to parent. The returned
// context's Done channel is closed when the deadline expires, when the returned
// cancel function is called, or when the parent context's Done channel is
// closed, whichever happens first.
//
// Canceling this context releases resources associated with it, so code should
// call cancel as soon as the operations running in this Context complete.
func WithDeadline(parent Context, deadline time.Time) (Context, CancelFunc) {
if cur, ok := parent.Deadline(); ok && cur.Before(deadline) {
// The current deadline is already sooner than the new one.
return WithCancel(parent)
}
c := &timerCtx{
cancelCtx: newCancelCtx(parent),
deadline: deadline,
}
propagateCancel(parent, c)
d := deadline.Sub(time.Now())
if d <= 0 {
c.cancel(true, DeadlineExceeded) // deadline has already passed
return c, func() { c.cancel(true, Canceled) }
}
c.mu.Lock()
defer c.mu.Unlock()
if c.err == nil {
c.timer = time.AfterFunc(d, func() {
c.cancel(true, DeadlineExceeded)
})
}
return c, func() { c.cancel(true, Canceled) }
}
// A timerCtx carries a timer and a deadline. It embeds a cancelCtx to
// implement Done and Err. It implements cancel by stopping its timer then
// delegating to cancelCtx.cancel.
type timerCtx struct {
*cancelCtx
timer *time.Timer // Under cancelCtx.mu.
deadline time.Time
}
func (c *timerCtx) Deadline() (deadline time.Time, ok bool) {
return c.deadline, true
}
func (c *timerCtx) String() string {
return fmt.Sprintf("%v.WithDeadline(%s [%s])", c.cancelCtx.Context, c.deadline, c.deadline.Sub(time.Now()))
}
func (c *timerCtx) cancel(removeFromParent bool, err error) {
c.cancelCtx.cancel(false, err)
if removeFromParent {
// Remove this timerCtx from its parent cancelCtx's children.
removeChild(c.cancelCtx.Context, c)
}
c.mu.Lock()
if c.timer != nil {
c.timer.Stop()
c.timer = nil
}
c.mu.Unlock()
}
// WithTimeout returns WithDeadline(parent, time.Now().Add(timeout)).
//
// Canceling this context releases resources associated with it, so code should
// call cancel as soon as the operations running in this Context complete:
//
// func slowOperationWithTimeout(ctx context.Context) (Result, error) {
// ctx, cancel := context.WithTimeout(ctx, 100*time.Millisecond)
// defer cancel() // releases resources if slowOperation completes before timeout elapses
// return slowOperation(ctx)
// }
func WithTimeout(parent Context, timeout time.Duration) (Context, CancelFunc) {
return WithDeadline(parent, time.Now().Add(timeout))
}
// WithValue returns a copy of parent in which the value associated with key is
// val.
//
// Use context Values only for request-scoped data that transits processes and
// APIs, not for passing optional parameters to functions.
func WithValue(parent Context, key interface{}, val interface{}) Context {
return &valueCtx{parent, key, val}
}
// A valueCtx carries a key-value pair. It implements Value for that key and
// delegates all other calls to the embedded Context.
type valueCtx struct {
Context
key, val interface{}
}
func (c *valueCtx) String() string {
return fmt.Sprintf("%v.WithValue(%#v, %#v)", c.Context, c.key, c.val)
}
func (c *valueCtx) Value(key interface{}) interface{} {
if c.key == key {
return c.val
}
return c.Context.Value(key)
}

View File

@ -1,109 +0,0 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !go1.9
package context
import "time"
// A Context carries a deadline, a cancelation signal, and other values across
// API boundaries.
//
// Context's methods may be called by multiple goroutines simultaneously.
type Context interface {
// Deadline returns the time when work done on behalf of this context
// should be canceled. Deadline returns ok==false when no deadline is
// set. Successive calls to Deadline return the same results.
Deadline() (deadline time.Time, ok bool)
// Done returns a channel that's closed when work done on behalf of this
// context should be canceled. Done may return nil if this context can
// never be canceled. Successive calls to Done return the same value.
//
// WithCancel arranges for Done to be closed when cancel is called;
// WithDeadline arranges for Done to be closed when the deadline
// expires; WithTimeout arranges for Done to be closed when the timeout
// elapses.
//
// Done is provided for use in select statements:
//
// // Stream generates values with DoSomething and sends them to out
// // until DoSomething returns an error or ctx.Done is closed.
// func Stream(ctx context.Context, out chan<- Value) error {
// for {
// v, err := DoSomething(ctx)
// if err != nil {
// return err
// }
// select {
// case <-ctx.Done():
// return ctx.Err()
// case out <- v:
// }
// }
// }
//
// See http://blog.golang.org/pipelines for more examples of how to use
// a Done channel for cancelation.
Done() <-chan struct{}
// Err returns a non-nil error value after Done is closed. Err returns
// Canceled if the context was canceled or DeadlineExceeded if the
// context's deadline passed. No other values for Err are defined.
// After Done is closed, successive calls to Err return the same value.
Err() error
// Value returns the value associated with this context for key, or nil
// if no value is associated with key. Successive calls to Value with
// the same key returns the same result.
//
// Use context values only for request-scoped data that transits
// processes and API boundaries, not for passing optional parameters to
// functions.
//
// A key identifies a specific value in a Context. Functions that wish
// to store values in Context typically allocate a key in a global
// variable then use that key as the argument to context.WithValue and
// Context.Value. A key can be any type that supports equality;
// packages should define keys as an unexported type to avoid
// collisions.
//
// Packages that define a Context key should provide type-safe accessors
// for the values stores using that key:
//
// // Package user defines a User type that's stored in Contexts.
// package user
//
// import "golang.org/x/net/context"
//
// // User is the type of value stored in the Contexts.
// type User struct {...}
//
// // key is an unexported type for keys defined in this package.
// // This prevents collisions with keys defined in other packages.
// type key int
//
// // userKey is the key for user.User values in Contexts. It is
// // unexported; clients use user.NewContext and user.FromContext
// // instead of using this key directly.
// var userKey key = 0
//
// // NewContext returns a new Context that carries value u.
// func NewContext(ctx context.Context, u *User) context.Context {
// return context.WithValue(ctx, userKey, u)
// }
//
// // FromContext returns the User value stored in ctx, if any.
// func FromContext(ctx context.Context) (*User, bool) {
// u, ok := ctx.Value(userKey).(*User)
// return u, ok
// }
Value(key interface{}) interface{}
}
// A CancelFunc tells an operation to abandon its work.
// A CancelFunc does not wait for the work to stop.
// After the first call, subsequent calls to a CancelFunc do nothing.
type CancelFunc func()

View File

@ -34,11 +34,19 @@ import (
)
var (
VerboseLogs bool
logFrameWrites bool
logFrameReads bool
inTests bool
disableExtendedConnectProtocol bool
VerboseLogs bool
logFrameWrites bool
logFrameReads bool
inTests bool
// Enabling extended CONNECT by causes browsers to attempt to use
// WebSockets-over-HTTP/2. This results in problems when the server's websocket
// package doesn't support extended CONNECT.
//
// Disable extended CONNECT by default for now.
//
// Issue #71128.
disableExtendedConnectProtocol = true
)
func init() {
@ -51,8 +59,8 @@ func init() {
logFrameWrites = true
logFrameReads = true
}
if strings.Contains(e, "http2xconnect=0") {
disableExtendedConnectProtocol = true
if strings.Contains(e, "http2xconnect=1") {
disableExtendedConnectProtocol = false
}
}
@ -407,23 +415,6 @@ func (s *sorter) SortStrings(ss []string) {
s.v = save
}
// validPseudoPath reports whether v is a valid :path pseudo-header
// value. It must be either:
//
// - a non-empty string starting with '/'
// - the string '*', for OPTIONS requests.
//
// For now this is only used a quick check for deciding when to clean
// up Opaque URLs before sending requests from the Transport.
// See golang.org/issue/16847
//
// We used to enforce that the path also didn't start with "//", but
// Google's GFE accepts such paths and Chrome sends them, so ignore
// that part of the spec. See golang.org/issue/19103.
func validPseudoPath(v string) bool {
return (len(v) > 0 && v[0] == '/') || v == "*"
}
// incomparable is a zero-width, non-comparable type. Adding it to a struct
// makes that struct also non-comparable, and generally doesn't add
// any size (as long as it's first).

View File

@ -50,6 +50,7 @@ import (
"golang.org/x/net/http/httpguts"
"golang.org/x/net/http2/hpack"
"golang.org/x/net/internal/httpcommon"
)
const (
@ -812,8 +813,7 @@ const maxCachedCanonicalHeadersKeysSize = 2048
func (sc *serverConn) canonicalHeader(v string) string {
sc.serveG.check()
buildCommonHeaderMapsOnce()
cv, ok := commonCanonHeader[v]
cv, ok := httpcommon.CachedCanonicalHeader(v)
if ok {
return cv
}
@ -2233,25 +2233,25 @@ func (sc *serverConn) newStream(id, pusherID uint32, state streamState) *stream
func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*responseWriter, *http.Request, error) {
sc.serveG.check()
rp := requestParam{
method: f.PseudoValue("method"),
scheme: f.PseudoValue("scheme"),
authority: f.PseudoValue("authority"),
path: f.PseudoValue("path"),
protocol: f.PseudoValue("protocol"),
rp := httpcommon.ServerRequestParam{
Method: f.PseudoValue("method"),
Scheme: f.PseudoValue("scheme"),
Authority: f.PseudoValue("authority"),
Path: f.PseudoValue("path"),
Protocol: f.PseudoValue("protocol"),
}
// extended connect is disabled, so we should not see :protocol
if disableExtendedConnectProtocol && rp.protocol != "" {
if disableExtendedConnectProtocol && rp.Protocol != "" {
return nil, nil, sc.countError("bad_connect", streamError(f.StreamID, ErrCodeProtocol))
}
isConnect := rp.method == "CONNECT"
isConnect := rp.Method == "CONNECT"
if isConnect {
if rp.protocol == "" && (rp.path != "" || rp.scheme != "" || rp.authority == "") {
if rp.Protocol == "" && (rp.Path != "" || rp.Scheme != "" || rp.Authority == "") {
return nil, nil, sc.countError("bad_connect", streamError(f.StreamID, ErrCodeProtocol))
}
} else if rp.method == "" || rp.path == "" || (rp.scheme != "https" && rp.scheme != "http") {
} else if rp.Method == "" || rp.Path == "" || (rp.Scheme != "https" && rp.Scheme != "http") {
// See 8.1.2.6 Malformed Requests and Responses:
//
// Malformed requests or responses that are detected
@ -2265,15 +2265,16 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res
return nil, nil, sc.countError("bad_path_method", streamError(f.StreamID, ErrCodeProtocol))
}
rp.header = make(http.Header)
header := make(http.Header)
rp.Header = header
for _, hf := range f.RegularFields() {
rp.header.Add(sc.canonicalHeader(hf.Name), hf.Value)
header.Add(sc.canonicalHeader(hf.Name), hf.Value)
}
if rp.authority == "" {
rp.authority = rp.header.Get("Host")
if rp.Authority == "" {
rp.Authority = header.Get("Host")
}
if rp.protocol != "" {
rp.header.Set(":protocol", rp.protocol)
if rp.Protocol != "" {
header.Set(":protocol", rp.Protocol)
}
rw, req, err := sc.newWriterAndRequestNoBody(st, rp)
@ -2282,7 +2283,7 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res
}
bodyOpen := !f.StreamEnded()
if bodyOpen {
if vv, ok := rp.header["Content-Length"]; ok {
if vv, ok := rp.Header["Content-Length"]; ok {
if cl, err := strconv.ParseUint(vv[0], 10, 63); err == nil {
req.ContentLength = int64(cl)
} else {
@ -2298,84 +2299,38 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res
return rw, req, nil
}
type requestParam struct {
method string
scheme, authority, path string
protocol string
header http.Header
}
func (sc *serverConn) newWriterAndRequestNoBody(st *stream, rp requestParam) (*responseWriter, *http.Request, error) {
func (sc *serverConn) newWriterAndRequestNoBody(st *stream, rp httpcommon.ServerRequestParam) (*responseWriter, *http.Request, error) {
sc.serveG.check()
var tlsState *tls.ConnectionState // nil if not scheme https
if rp.scheme == "https" {
if rp.Scheme == "https" {
tlsState = sc.tlsState
}
needsContinue := httpguts.HeaderValuesContainsToken(rp.header["Expect"], "100-continue")
if needsContinue {
rp.header.Del("Expect")
}
// Merge Cookie headers into one "; "-delimited value.
if cookies := rp.header["Cookie"]; len(cookies) > 1 {
rp.header.Set("Cookie", strings.Join(cookies, "; "))
}
// Setup Trailers
var trailer http.Header
for _, v := range rp.header["Trailer"] {
for _, key := range strings.Split(v, ",") {
key = http.CanonicalHeaderKey(textproto.TrimString(key))
switch key {
case "Transfer-Encoding", "Trailer", "Content-Length":
// Bogus. (copy of http1 rules)
// Ignore.
default:
if trailer == nil {
trailer = make(http.Header)
}
trailer[key] = nil
}
}
}
delete(rp.header, "Trailer")
var url_ *url.URL
var requestURI string
if rp.method == "CONNECT" && rp.protocol == "" {
url_ = &url.URL{Host: rp.authority}
requestURI = rp.authority // mimic HTTP/1 server behavior
} else {
var err error
url_, err = url.ParseRequestURI(rp.path)
if err != nil {
return nil, nil, sc.countError("bad_path", streamError(st.id, ErrCodeProtocol))
}
requestURI = rp.path
res := httpcommon.NewServerRequest(rp)
if res.InvalidReason != "" {
return nil, nil, sc.countError(res.InvalidReason, streamError(st.id, ErrCodeProtocol))
}
body := &requestBody{
conn: sc,
stream: st,
needsContinue: needsContinue,
needsContinue: res.NeedsContinue,
}
req := &http.Request{
Method: rp.method,
URL: url_,
req := (&http.Request{
Method: rp.Method,
URL: res.URL,
RemoteAddr: sc.remoteAddrStr,
Header: rp.header,
RequestURI: requestURI,
Header: rp.Header,
RequestURI: res.RequestURI,
Proto: "HTTP/2.0",
ProtoMajor: 2,
ProtoMinor: 0,
TLS: tlsState,
Host: rp.authority,
Host: rp.Authority,
Body: body,
Trailer: trailer,
}
req = req.WithContext(st.ctx)
Trailer: res.Trailer,
}).WithContext(st.ctx)
rw := sc.newResponseWriter(st, req)
return rw, req, nil
}
@ -3270,12 +3225,12 @@ func (sc *serverConn) startPush(msg *startPushRequest) {
// we start in "half closed (remote)" for simplicity.
// See further comments at the definition of stateHalfClosedRemote.
promised := sc.newStream(promisedID, msg.parent.id, stateHalfClosedRemote)
rw, req, err := sc.newWriterAndRequestNoBody(promised, requestParam{
method: msg.method,
scheme: msg.url.Scheme,
authority: msg.url.Host,
path: msg.url.RequestURI(),
header: cloneHeader(msg.header), // clone since handler runs concurrently with writing the PUSH_PROMISE
rw, req, err := sc.newWriterAndRequestNoBody(promised, httpcommon.ServerRequestParam{
Method: msg.method,
Scheme: msg.url.Scheme,
Authority: msg.url.Host,
Path: msg.url.RequestURI(),
Header: cloneHeader(msg.header), // clone since handler runs concurrently with writing the PUSH_PROMISE
})
if err != nil {
// Should not happen, since we've already validated msg.url.

View File

@ -25,7 +25,6 @@ import (
"net/http"
"net/http/httptrace"
"net/textproto"
"sort"
"strconv"
"strings"
"sync"
@ -35,6 +34,7 @@ import (
"golang.org/x/net/http/httpguts"
"golang.org/x/net/http2/hpack"
"golang.org/x/net/idna"
"golang.org/x/net/internal/httpcommon"
)
const (
@ -1275,23 +1275,6 @@ func (cc *ClientConn) closeForLostPing() {
// exported. At least they'll be DeepEqual for h1-vs-h2 comparisons tests.
var errRequestCanceled = errors.New("net/http: request canceled")
func commaSeparatedTrailers(req *http.Request) (string, error) {
keys := make([]string, 0, len(req.Trailer))
for k := range req.Trailer {
k = canonicalHeader(k)
switch k {
case "Transfer-Encoding", "Trailer", "Content-Length":
return "", fmt.Errorf("invalid Trailer key %q", k)
}
keys = append(keys, k)
}
if len(keys) > 0 {
sort.Strings(keys)
return strings.Join(keys, ","), nil
}
return "", nil
}
func (cc *ClientConn) responseHeaderTimeout() time.Duration {
if cc.t.t1 != nil {
return cc.t.t1.ResponseHeaderTimeout
@ -1303,22 +1286,6 @@ func (cc *ClientConn) responseHeaderTimeout() time.Duration {
return 0
}
// checkConnHeaders checks whether req has any invalid connection-level headers.
// per RFC 7540 section 8.1.2.2: Connection-Specific Header Fields.
// Certain headers are special-cased as okay but not transmitted later.
func checkConnHeaders(req *http.Request) error {
if v := req.Header.Get("Upgrade"); v != "" {
return fmt.Errorf("http2: invalid Upgrade request header: %q", req.Header["Upgrade"])
}
if vv := req.Header["Transfer-Encoding"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && vv[0] != "chunked") {
return fmt.Errorf("http2: invalid Transfer-Encoding request header: %q", vv)
}
if vv := req.Header["Connection"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && !asciiEqualFold(vv[0], "close") && !asciiEqualFold(vv[0], "keep-alive")) {
return fmt.Errorf("http2: invalid Connection request header: %q", vv)
}
return nil
}
// actualContentLength returns a sanitized version of
// req.ContentLength, where 0 actually means zero (not unknown) and -1
// means unknown.
@ -1364,25 +1331,7 @@ func (cc *ClientConn) roundTrip(req *http.Request, streamf func(*clientStream))
donec: make(chan struct{}),
}
// TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere?
if !cc.t.disableCompression() &&
req.Header.Get("Accept-Encoding") == "" &&
req.Header.Get("Range") == "" &&
!cs.isHead {
// Request gzip only, not deflate. Deflate is ambiguous and
// not as universally supported anyway.
// See: https://zlib.net/zlib_faq.html#faq39
//
// Note that we don't request this for HEAD requests,
// due to a bug in nginx:
// http://trac.nginx.org/nginx/ticket/358
// https://golang.org/issue/5522
//
// We don't request gzip if the request is for a range, since
// auto-decoding a portion of a gzipped document will just fail
// anyway. See https://golang.org/issue/8923
cs.requestedGzip = true
}
cs.requestedGzip = httpcommon.IsRequestGzip(req.Method, req.Header, cc.t.disableCompression())
go cs.doRequest(req, streamf)
@ -1496,10 +1445,6 @@ func (cs *clientStream) writeRequest(req *http.Request, streamf func(*clientStre
cc := cs.cc
ctx := cs.ctx
if err := checkConnHeaders(req); err != nil {
return err
}
// wait for setting frames to be received, a server can change this value later,
// but we just wait for the first settings frame
var isExtendedConnect bool
@ -1663,26 +1608,39 @@ func (cs *clientStream) encodeAndWriteHeaders(req *http.Request) error {
// we send: HEADERS{1}, CONTINUATION{0,} + DATA{0,} (DATA is
// sent by writeRequestBody below, along with any Trailers,
// again in form HEADERS{1}, CONTINUATION{0,})
trailers, err := commaSeparatedTrailers(req)
cc.hbuf.Reset()
res, err := encodeRequestHeaders(req, cs.requestedGzip, cc.peerMaxHeaderListSize, func(name, value string) {
cc.writeHeader(name, value)
})
if err != nil {
return err
}
hasTrailers := trailers != ""
contentLen := actualContentLength(req)
hasBody := contentLen != 0
hdrs, err := cc.encodeHeaders(req, cs.requestedGzip, trailers, contentLen)
if err != nil {
return err
return fmt.Errorf("http2: %w", err)
}
hdrs := cc.hbuf.Bytes()
// Write the request.
endStream := !hasBody && !hasTrailers
endStream := !res.HasBody && !res.HasTrailers
cs.sentHeaders = true
err = cc.writeHeaders(cs.ID, endStream, int(cc.maxFrameSize), hdrs)
traceWroteHeaders(cs.trace)
return err
}
func encodeRequestHeaders(req *http.Request, addGzipHeader bool, peerMaxHeaderListSize uint64, headerf func(name, value string)) (httpcommon.EncodeHeadersResult, error) {
return httpcommon.EncodeHeaders(req.Context(), httpcommon.EncodeHeadersParam{
Request: httpcommon.Request{
Header: req.Header,
Trailer: req.Trailer,
URL: req.URL,
Host: req.Host,
Method: req.Method,
ActualContentLength: actualContentLength(req),
},
AddGzipHeader: addGzipHeader,
PeerMaxHeaderListSize: peerMaxHeaderListSize,
DefaultUserAgent: defaultUserAgent,
}, headerf)
}
// cleanupWriteRequest performs post-request tasks.
//
// If err (the result of writeRequest) is non-nil and the stream is not closed,
@ -2070,218 +2028,6 @@ func (cs *clientStream) awaitFlowControl(maxBytes int) (taken int32, err error)
}
}
func validateHeaders(hdrs http.Header) string {
for k, vv := range hdrs {
if !httpguts.ValidHeaderFieldName(k) && k != ":protocol" {
return fmt.Sprintf("name %q", k)
}
for _, v := range vv {
if !httpguts.ValidHeaderFieldValue(v) {
// Don't include the value in the error,
// because it may be sensitive.
return fmt.Sprintf("value for header %q", k)
}
}
}
return ""
}
var errNilRequestURL = errors.New("http2: Request.URI is nil")
func isNormalConnect(req *http.Request) bool {
return req.Method == "CONNECT" && req.Header.Get(":protocol") == ""
}
// requires cc.wmu be held.
func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trailers string, contentLength int64) ([]byte, error) {
cc.hbuf.Reset()
if req.URL == nil {
return nil, errNilRequestURL
}
host := req.Host
if host == "" {
host = req.URL.Host
}
host, err := httpguts.PunycodeHostPort(host)
if err != nil {
return nil, err
}
if !httpguts.ValidHostHeader(host) {
return nil, errors.New("http2: invalid Host header")
}
var path string
if !isNormalConnect(req) {
path = req.URL.RequestURI()
if !validPseudoPath(path) {
orig := path
path = strings.TrimPrefix(path, req.URL.Scheme+"://"+host)
if !validPseudoPath(path) {
if req.URL.Opaque != "" {
return nil, fmt.Errorf("invalid request :path %q from URL.Opaque = %q", orig, req.URL.Opaque)
} else {
return nil, fmt.Errorf("invalid request :path %q", orig)
}
}
}
}
// Check for any invalid headers+trailers and return an error before we
// potentially pollute our hpack state. (We want to be able to
// continue to reuse the hpack encoder for future requests)
if err := validateHeaders(req.Header); err != "" {
return nil, fmt.Errorf("invalid HTTP header %s", err)
}
if err := validateHeaders(req.Trailer); err != "" {
return nil, fmt.Errorf("invalid HTTP trailer %s", err)
}
enumerateHeaders := func(f func(name, value string)) {
// 8.1.2.3 Request Pseudo-Header Fields
// The :path pseudo-header field includes the path and query parts of the
// target URI (the path-absolute production and optionally a '?' character
// followed by the query production, see Sections 3.3 and 3.4 of
// [RFC3986]).
f(":authority", host)
m := req.Method
if m == "" {
m = http.MethodGet
}
f(":method", m)
if !isNormalConnect(req) {
f(":path", path)
f(":scheme", req.URL.Scheme)
}
if trailers != "" {
f("trailer", trailers)
}
var didUA bool
for k, vv := range req.Header {
if asciiEqualFold(k, "host") || asciiEqualFold(k, "content-length") {
// Host is :authority, already sent.
// Content-Length is automatic, set below.
continue
} else if asciiEqualFold(k, "connection") ||
asciiEqualFold(k, "proxy-connection") ||
asciiEqualFold(k, "transfer-encoding") ||
asciiEqualFold(k, "upgrade") ||
asciiEqualFold(k, "keep-alive") {
// Per 8.1.2.2 Connection-Specific Header
// Fields, don't send connection-specific
// fields. We have already checked if any
// are error-worthy so just ignore the rest.
continue
} else if asciiEqualFold(k, "user-agent") {
// Match Go's http1 behavior: at most one
// User-Agent. If set to nil or empty string,
// then omit it. Otherwise if not mentioned,
// include the default (below).
didUA = true
if len(vv) < 1 {
continue
}
vv = vv[:1]
if vv[0] == "" {
continue
}
} else if asciiEqualFold(k, "cookie") {
// Per 8.1.2.5 To allow for better compression efficiency, the
// Cookie header field MAY be split into separate header fields,
// each with one or more cookie-pairs.
for _, v := range vv {
for {
p := strings.IndexByte(v, ';')
if p < 0 {
break
}
f("cookie", v[:p])
p++
// strip space after semicolon if any.
for p+1 <= len(v) && v[p] == ' ' {
p++
}
v = v[p:]
}
if len(v) > 0 {
f("cookie", v)
}
}
continue
}
for _, v := range vv {
f(k, v)
}
}
if shouldSendReqContentLength(req.Method, contentLength) {
f("content-length", strconv.FormatInt(contentLength, 10))
}
if addGzipHeader {
f("accept-encoding", "gzip")
}
if !didUA {
f("user-agent", defaultUserAgent)
}
}
// Do a first pass over the headers counting bytes to ensure
// we don't exceed cc.peerMaxHeaderListSize. This is done as a
// separate pass before encoding the headers to prevent
// modifying the hpack state.
hlSize := uint64(0)
enumerateHeaders(func(name, value string) {
hf := hpack.HeaderField{Name: name, Value: value}
hlSize += uint64(hf.Size())
})
if hlSize > cc.peerMaxHeaderListSize {
return nil, errRequestHeaderListSize
}
trace := httptrace.ContextClientTrace(req.Context())
traceHeaders := traceHasWroteHeaderField(trace)
// Header list size is ok. Write the headers.
enumerateHeaders(func(name, value string) {
name, ascii := lowerHeader(name)
if !ascii {
// Skip writing invalid headers. Per RFC 7540, Section 8.1.2, header
// field names have to be ASCII characters (just as in HTTP/1.x).
return
}
cc.writeHeader(name, value)
if traceHeaders {
traceWroteHeaderField(trace, name, value)
}
})
return cc.hbuf.Bytes(), nil
}
// shouldSendReqContentLength reports whether the http2.Transport should send
// a "content-length" request header. This logic is basically a copy of the net/http
// transferWriter.shouldSendContentLength.
// The contentLength is the corrected contentLength (so 0 means actually 0, not unknown).
// -1 means unknown.
func shouldSendReqContentLength(method string, contentLength int64) bool {
if contentLength > 0 {
return true
}
if contentLength < 0 {
return false
}
// For zero bodies, whether we send a content-length depends on the method.
// It also kinda doesn't matter for http2 either way, with END_STREAM.
switch method {
case "POST", "PUT", "PATCH":
return true
default:
return false
}
}
// requires cc.wmu be held.
func (cc *ClientConn) encodeTrailers(trailer http.Header) ([]byte, error) {
cc.hbuf.Reset()
@ -2298,7 +2044,7 @@ func (cc *ClientConn) encodeTrailers(trailer http.Header) ([]byte, error) {
}
for k, vv := range trailer {
lowKey, ascii := lowerHeader(k)
lowKey, ascii := httpcommon.LowerHeader(k)
if !ascii {
// Skip writing invalid headers. Per RFC 7540, Section 8.1.2, header
// field names have to be ASCII characters (just as in HTTP/1.x).
@ -2464,6 +2210,13 @@ func (rl *clientConnReadLoop) cleanup() {
}
cc.cond.Broadcast()
cc.mu.Unlock()
if !cc.seenSettings {
// If we have a pending request that wants extended CONNECT,
// let it continue and fail with the connection error.
cc.extendedConnectAllowed = true
close(cc.seenSettingsChan)
}
}
// countReadFrameError calls Transport.CountError with a string
@ -2556,9 +2309,6 @@ func (rl *clientConnReadLoop) run() error {
if VerboseLogs {
cc.vlogf("http2: Transport conn %p received error from processing frame %v: %v", cc, summarizeFrame(f), err)
}
if !cc.seenSettings {
close(cc.seenSettingsChan)
}
return err
}
}
@ -2653,7 +2403,7 @@ func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFra
Status: status + " " + http.StatusText(statusCode),
}
for _, hf := range regularFields {
key := canonicalHeader(hf.Name)
key := httpcommon.CanonicalHeader(hf.Name)
if key == "Trailer" {
t := res.Trailer
if t == nil {
@ -2661,7 +2411,7 @@ func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFra
res.Trailer = t
}
foreachHeaderElement(hf.Value, func(v string) {
t[canonicalHeader(v)] = nil
t[httpcommon.CanonicalHeader(v)] = nil
})
} else {
vv := header[key]
@ -2785,7 +2535,7 @@ func (rl *clientConnReadLoop) processTrailers(cs *clientStream, f *MetaHeadersFr
trailer := make(http.Header)
for _, hf := range f.RegularFields() {
key := canonicalHeader(hf.Name)
key := httpcommon.CanonicalHeader(hf.Name)
trailer[key] = append(trailer[key], hf.Value)
}
cs.trailer = trailer
@ -3331,7 +3081,7 @@ func (cc *ClientConn) writeStreamReset(streamID uint32, code ErrCode, ping bool,
var (
errResponseHeaderListSize = errors.New("http2: response header list larger than advertised limit")
errRequestHeaderListSize = errors.New("http2: request header list larger than peer's advertised limit")
errRequestHeaderListSize = httpcommon.ErrRequestHeaderListSize
)
func (cc *ClientConn) logf(format string, args ...interface{}) {
@ -3515,16 +3265,6 @@ func traceFirstResponseByte(trace *httptrace.ClientTrace) {
}
}
func traceHasWroteHeaderField(trace *httptrace.ClientTrace) bool {
return trace != nil && trace.WroteHeaderField != nil
}
func traceWroteHeaderField(trace *httptrace.ClientTrace, k, v string) {
if trace != nil && trace.WroteHeaderField != nil {
trace.WroteHeaderField(k, []string{v})
}
}
func traceGot1xxResponseFunc(trace *httptrace.ClientTrace) func(int, textproto.MIMEHeader) error {
if trace != nil {
return trace.Got1xxResponse

View File

@ -13,6 +13,7 @@ import (
"golang.org/x/net/http/httpguts"
"golang.org/x/net/http2/hpack"
"golang.org/x/net/internal/httpcommon"
)
// writeFramer is implemented by any type that is used to write frames.
@ -351,7 +352,7 @@ func encodeHeaders(enc *hpack.Encoder, h http.Header, keys []string) {
}
for _, k := range keys {
vv := h[k]
k, ascii := lowerHeader(k)
k, ascii := httpcommon.LowerHeader(k)
if !ascii {
// Skip writing invalid headers. Per RFC 7540, Section 8.1.2, header
// field names have to be ASCII characters (just as in HTTP/1.x).

53
vendor/golang.org/x/net/internal/httpcommon/ascii.go generated vendored Normal file
View File

@ -0,0 +1,53 @@
// Copyright 2025 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package httpcommon
import "strings"
// The HTTP protocols are defined in terms of ASCII, not Unicode. This file
// contains helper functions which may use Unicode-aware functions which would
// otherwise be unsafe and could introduce vulnerabilities if used improperly.
// asciiEqualFold is strings.EqualFold, ASCII only. It reports whether s and t
// are equal, ASCII-case-insensitively.
func asciiEqualFold(s, t string) bool {
if len(s) != len(t) {
return false
}
for i := 0; i < len(s); i++ {
if lower(s[i]) != lower(t[i]) {
return false
}
}
return true
}
// lower returns the ASCII lowercase version of b.
func lower(b byte) byte {
if 'A' <= b && b <= 'Z' {
return b + ('a' - 'A')
}
return b
}
// isASCIIPrint returns whether s is ASCII and printable according to
// https://tools.ietf.org/html/rfc20#section-4.2.
func isASCIIPrint(s string) bool {
for i := 0; i < len(s); i++ {
if s[i] < ' ' || s[i] > '~' {
return false
}
}
return true
}
// asciiToLower returns the lowercase version of s if s is ASCII and printable,
// and whether or not it was.
func asciiToLower(s string) (lower string, ok bool) {
if !isASCIIPrint(s) {
return "", false
}
return strings.ToLower(s), true
}

View File

@ -1,11 +1,11 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Copyright 2025 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package http2
package httpcommon
import (
"net/http"
"net/textproto"
"sync"
)
@ -82,13 +82,15 @@ func buildCommonHeaderMaps() {
commonLowerHeader = make(map[string]string, len(common))
commonCanonHeader = make(map[string]string, len(common))
for _, v := range common {
chk := http.CanonicalHeaderKey(v)
chk := textproto.CanonicalMIMEHeaderKey(v)
commonLowerHeader[chk] = v
commonCanonHeader[v] = chk
}
}
func lowerHeader(v string) (lower string, ascii bool) {
// LowerHeader returns the lowercase form of a header name,
// used on the wire for HTTP/2 and HTTP/3 requests.
func LowerHeader(v string) (lower string, ascii bool) {
buildCommonHeaderMapsOnce()
if s, ok := commonLowerHeader[v]; ok {
return s, true
@ -96,10 +98,18 @@ func lowerHeader(v string) (lower string, ascii bool) {
return asciiToLower(v)
}
func canonicalHeader(v string) string {
// CanonicalHeader canonicalizes a header name. (For example, "host" becomes "Host".)
func CanonicalHeader(v string) string {
buildCommonHeaderMapsOnce()
if s, ok := commonCanonHeader[v]; ok {
return s
}
return http.CanonicalHeaderKey(v)
return textproto.CanonicalMIMEHeaderKey(v)
}
// CachedCanonicalHeader returns the canonical form of a well-known header name.
func CachedCanonicalHeader(v string) (string, bool) {
buildCommonHeaderMapsOnce()
s, ok := commonCanonHeader[v]
return s, ok
}

467
vendor/golang.org/x/net/internal/httpcommon/request.go generated vendored Normal file
View File

@ -0,0 +1,467 @@
// Copyright 2025 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package httpcommon
import (
"context"
"errors"
"fmt"
"net/http/httptrace"
"net/textproto"
"net/url"
"sort"
"strconv"
"strings"
"golang.org/x/net/http/httpguts"
"golang.org/x/net/http2/hpack"
)
var (
ErrRequestHeaderListSize = errors.New("request header list larger than peer's advertised limit")
)
// Request is a subset of http.Request.
// It'd be simpler to pass an *http.Request, of course, but we can't depend on net/http
// without creating a dependency cycle.
type Request struct {
URL *url.URL
Method string
Host string
Header map[string][]string
Trailer map[string][]string
ActualContentLength int64 // 0 means 0, -1 means unknown
}
// EncodeHeadersParam is parameters to EncodeHeaders.
type EncodeHeadersParam struct {
Request Request
// AddGzipHeader indicates that an "accept-encoding: gzip" header should be
// added to the request.
AddGzipHeader bool
// PeerMaxHeaderListSize, when non-zero, is the peer's MAX_HEADER_LIST_SIZE setting.
PeerMaxHeaderListSize uint64
// DefaultUserAgent is the User-Agent header to send when the request
// neither contains a User-Agent nor disables it.
DefaultUserAgent string
}
// EncodeHeadersParam is the result of EncodeHeaders.
type EncodeHeadersResult struct {
HasBody bool
HasTrailers bool
}
// EncodeHeaders constructs request headers common to HTTP/2 and HTTP/3.
// It validates a request and calls headerf with each pseudo-header and header
// for the request.
// The headerf function is called with the validated, canonicalized header name.
func EncodeHeaders(ctx context.Context, param EncodeHeadersParam, headerf func(name, value string)) (res EncodeHeadersResult, _ error) {
req := param.Request
// Check for invalid connection-level headers.
if err := checkConnHeaders(req.Header); err != nil {
return res, err
}
if req.URL == nil {
return res, errors.New("Request.URL is nil")
}
host := req.Host
if host == "" {
host = req.URL.Host
}
host, err := httpguts.PunycodeHostPort(host)
if err != nil {
return res, err
}
if !httpguts.ValidHostHeader(host) {
return res, errors.New("invalid Host header")
}
// isNormalConnect is true if this is a non-extended CONNECT request.
isNormalConnect := false
var protocol string
if vv := req.Header[":protocol"]; len(vv) > 0 {
protocol = vv[0]
}
if req.Method == "CONNECT" && protocol == "" {
isNormalConnect = true
} else if protocol != "" && req.Method != "CONNECT" {
return res, errors.New("invalid :protocol header in non-CONNECT request")
}
// Validate the path, except for non-extended CONNECT requests which have no path.
var path string
if !isNormalConnect {
path = req.URL.RequestURI()
if !validPseudoPath(path) {
orig := path
path = strings.TrimPrefix(path, req.URL.Scheme+"://"+host)
if !validPseudoPath(path) {
if req.URL.Opaque != "" {
return res, fmt.Errorf("invalid request :path %q from URL.Opaque = %q", orig, req.URL.Opaque)
} else {
return res, fmt.Errorf("invalid request :path %q", orig)
}
}
}
}
// Check for any invalid headers+trailers and return an error before we
// potentially pollute our hpack state. (We want to be able to
// continue to reuse the hpack encoder for future requests)
if err := validateHeaders(req.Header); err != "" {
return res, fmt.Errorf("invalid HTTP header %s", err)
}
if err := validateHeaders(req.Trailer); err != "" {
return res, fmt.Errorf("invalid HTTP trailer %s", err)
}
trailers, err := commaSeparatedTrailers(req.Trailer)
if err != nil {
return res, err
}
enumerateHeaders := func(f func(name, value string)) {
// 8.1.2.3 Request Pseudo-Header Fields
// The :path pseudo-header field includes the path and query parts of the
// target URI (the path-absolute production and optionally a '?' character
// followed by the query production, see Sections 3.3 and 3.4 of
// [RFC3986]).
f(":authority", host)
m := req.Method
if m == "" {
m = "GET"
}
f(":method", m)
if !isNormalConnect {
f(":path", path)
f(":scheme", req.URL.Scheme)
}
if protocol != "" {
f(":protocol", protocol)
}
if trailers != "" {
f("trailer", trailers)
}
var didUA bool
for k, vv := range req.Header {
if asciiEqualFold(k, "host") || asciiEqualFold(k, "content-length") {
// Host is :authority, already sent.
// Content-Length is automatic, set below.
continue
} else if asciiEqualFold(k, "connection") ||
asciiEqualFold(k, "proxy-connection") ||
asciiEqualFold(k, "transfer-encoding") ||
asciiEqualFold(k, "upgrade") ||
asciiEqualFold(k, "keep-alive") {
// Per 8.1.2.2 Connection-Specific Header
// Fields, don't send connection-specific
// fields. We have already checked if any
// are error-worthy so just ignore the rest.
continue
} else if asciiEqualFold(k, "user-agent") {
// Match Go's http1 behavior: at most one
// User-Agent. If set to nil or empty string,
// then omit it. Otherwise if not mentioned,
// include the default (below).
didUA = true
if len(vv) < 1 {
continue
}
vv = vv[:1]
if vv[0] == "" {
continue
}
} else if asciiEqualFold(k, "cookie") {
// Per 8.1.2.5 To allow for better compression efficiency, the
// Cookie header field MAY be split into separate header fields,
// each with one or more cookie-pairs.
for _, v := range vv {
for {
p := strings.IndexByte(v, ';')
if p < 0 {
break
}
f("cookie", v[:p])
p++
// strip space after semicolon if any.
for p+1 <= len(v) && v[p] == ' ' {
p++
}
v = v[p:]
}
if len(v) > 0 {
f("cookie", v)
}
}
continue
} else if k == ":protocol" {
// :protocol pseudo-header was already sent above.
continue
}
for _, v := range vv {
f(k, v)
}
}
if shouldSendReqContentLength(req.Method, req.ActualContentLength) {
f("content-length", strconv.FormatInt(req.ActualContentLength, 10))
}
if param.AddGzipHeader {
f("accept-encoding", "gzip")
}
if !didUA {
f("user-agent", param.DefaultUserAgent)
}
}
// Do a first pass over the headers counting bytes to ensure
// we don't exceed cc.peerMaxHeaderListSize. This is done as a
// separate pass before encoding the headers to prevent
// modifying the hpack state.
if param.PeerMaxHeaderListSize > 0 {
hlSize := uint64(0)
enumerateHeaders(func(name, value string) {
hf := hpack.HeaderField{Name: name, Value: value}
hlSize += uint64(hf.Size())
})
if hlSize > param.PeerMaxHeaderListSize {
return res, ErrRequestHeaderListSize
}
}
trace := httptrace.ContextClientTrace(ctx)
// Header list size is ok. Write the headers.
enumerateHeaders(func(name, value string) {
name, ascii := LowerHeader(name)
if !ascii {
// Skip writing invalid headers. Per RFC 7540, Section 8.1.2, header
// field names have to be ASCII characters (just as in HTTP/1.x).
return
}
headerf(name, value)
if trace != nil && trace.WroteHeaderField != nil {
trace.WroteHeaderField(name, []string{value})
}
})
res.HasBody = req.ActualContentLength != 0
res.HasTrailers = trailers != ""
return res, nil
}
// IsRequestGzip reports whether we should add an Accept-Encoding: gzip header
// for a request.
func IsRequestGzip(method string, header map[string][]string, disableCompression bool) bool {
// TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere?
if !disableCompression &&
len(header["Accept-Encoding"]) == 0 &&
len(header["Range"]) == 0 &&
method != "HEAD" {
// Request gzip only, not deflate. Deflate is ambiguous and
// not as universally supported anyway.
// See: https://zlib.net/zlib_faq.html#faq39
//
// Note that we don't request this for HEAD requests,
// due to a bug in nginx:
// http://trac.nginx.org/nginx/ticket/358
// https://golang.org/issue/5522
//
// We don't request gzip if the request is for a range, since
// auto-decoding a portion of a gzipped document will just fail
// anyway. See https://golang.org/issue/8923
return true
}
return false
}
// checkConnHeaders checks whether req has any invalid connection-level headers.
//
// https://www.rfc-editor.org/rfc/rfc9114.html#section-4.2-3
// https://www.rfc-editor.org/rfc/rfc9113.html#section-8.2.2-1
//
// Certain headers are special-cased as okay but not transmitted later.
// For example, we allow "Transfer-Encoding: chunked", but drop the header when encoding.
func checkConnHeaders(h map[string][]string) error {
if vv := h["Upgrade"]; len(vv) > 0 && (vv[0] != "" && vv[0] != "chunked") {
return fmt.Errorf("invalid Upgrade request header: %q", vv)
}
if vv := h["Transfer-Encoding"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && vv[0] != "chunked") {
return fmt.Errorf("invalid Transfer-Encoding request header: %q", vv)
}
if vv := h["Connection"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && !asciiEqualFold(vv[0], "close") && !asciiEqualFold(vv[0], "keep-alive")) {
return fmt.Errorf("invalid Connection request header: %q", vv)
}
return nil
}
func commaSeparatedTrailers(trailer map[string][]string) (string, error) {
keys := make([]string, 0, len(trailer))
for k := range trailer {
k = CanonicalHeader(k)
switch k {
case "Transfer-Encoding", "Trailer", "Content-Length":
return "", fmt.Errorf("invalid Trailer key %q", k)
}
keys = append(keys, k)
}
if len(keys) > 0 {
sort.Strings(keys)
return strings.Join(keys, ","), nil
}
return "", nil
}
// validPseudoPath reports whether v is a valid :path pseudo-header
// value. It must be either:
//
// - a non-empty string starting with '/'
// - the string '*', for OPTIONS requests.
//
// For now this is only used a quick check for deciding when to clean
// up Opaque URLs before sending requests from the Transport.
// See golang.org/issue/16847
//
// We used to enforce that the path also didn't start with "//", but
// Google's GFE accepts such paths and Chrome sends them, so ignore
// that part of the spec. See golang.org/issue/19103.
func validPseudoPath(v string) bool {
return (len(v) > 0 && v[0] == '/') || v == "*"
}
func validateHeaders(hdrs map[string][]string) string {
for k, vv := range hdrs {
if !httpguts.ValidHeaderFieldName(k) && k != ":protocol" {
return fmt.Sprintf("name %q", k)
}
for _, v := range vv {
if !httpguts.ValidHeaderFieldValue(v) {
// Don't include the value in the error,
// because it may be sensitive.
return fmt.Sprintf("value for header %q", k)
}
}
}
return ""
}
// shouldSendReqContentLength reports whether we should send
// a "content-length" request header. This logic is basically a copy of the net/http
// transferWriter.shouldSendContentLength.
// The contentLength is the corrected contentLength (so 0 means actually 0, not unknown).
// -1 means unknown.
func shouldSendReqContentLength(method string, contentLength int64) bool {
if contentLength > 0 {
return true
}
if contentLength < 0 {
return false
}
// For zero bodies, whether we send a content-length depends on the method.
// It also kinda doesn't matter for http2 either way, with END_STREAM.
switch method {
case "POST", "PUT", "PATCH":
return true
default:
return false
}
}
// ServerRequestParam is parameters to NewServerRequest.
type ServerRequestParam struct {
Method string
Scheme, Authority, Path string
Protocol string
Header map[string][]string
}
// ServerRequestResult is the result of NewServerRequest.
type ServerRequestResult struct {
// Various http.Request fields.
URL *url.URL
RequestURI string
Trailer map[string][]string
NeedsContinue bool // client provided an "Expect: 100-continue" header
// If the request should be rejected, this is a short string suitable for passing
// to the http2 package's CountError function.
// It might be a bit odd to return errors this way rather than returing an error,
// but this ensures we don't forget to include a CountError reason.
InvalidReason string
}
func NewServerRequest(rp ServerRequestParam) ServerRequestResult {
needsContinue := httpguts.HeaderValuesContainsToken(rp.Header["Expect"], "100-continue")
if needsContinue {
delete(rp.Header, "Expect")
}
// Merge Cookie headers into one "; "-delimited value.
if cookies := rp.Header["Cookie"]; len(cookies) > 1 {
rp.Header["Cookie"] = []string{strings.Join(cookies, "; ")}
}
// Setup Trailers
var trailer map[string][]string
for _, v := range rp.Header["Trailer"] {
for _, key := range strings.Split(v, ",") {
key = textproto.CanonicalMIMEHeaderKey(textproto.TrimString(key))
switch key {
case "Transfer-Encoding", "Trailer", "Content-Length":
// Bogus. (copy of http1 rules)
// Ignore.
default:
if trailer == nil {
trailer = make(map[string][]string)
}
trailer[key] = nil
}
}
}
delete(rp.Header, "Trailer")
// "':authority' MUST NOT include the deprecated userinfo subcomponent
// for "http" or "https" schemed URIs."
// https://www.rfc-editor.org/rfc/rfc9113.html#section-8.3.1-2.3.8
if strings.IndexByte(rp.Authority, '@') != -1 && (rp.Scheme == "http" || rp.Scheme == "https") {
return ServerRequestResult{
InvalidReason: "userinfo_in_authority",
}
}
var url_ *url.URL
var requestURI string
if rp.Method == "CONNECT" && rp.Protocol == "" {
url_ = &url.URL{Host: rp.Authority}
requestURI = rp.Authority // mimic HTTP/1 server behavior
} else {
var err error
url_, err = url.ParseRequestURI(rp.Path)
if err != nil {
return ServerRequestResult{
InvalidReason: "bad_path",
}
}
requestURI = rp.Path
}
return ServerRequestResult{
URL: url_,
NeedsContinue: needsContinue,
RequestURI: requestURI,
Trailer: trailer,
}
}

View File

@ -7,6 +7,7 @@ package proxy
import (
"context"
"net"
"net/netip"
"strings"
)
@ -57,7 +58,8 @@ func (p *PerHost) DialContext(ctx context.Context, network, addr string) (c net.
}
func (p *PerHost) dialerForRequest(host string) Dialer {
if ip := net.ParseIP(host); ip != nil {
if nip, err := netip.ParseAddr(host); err == nil {
ip := net.IP(nip.AsSlice())
for _, net := range p.bypassNetworks {
if net.Contains(ip) {
return p.bypass
@ -108,8 +110,8 @@ func (p *PerHost) AddFromString(s string) {
}
continue
}
if ip := net.ParseIP(host); ip != nil {
p.AddIP(ip)
if nip, err := netip.ParseAddr(host); err == nil {
p.AddIP(net.IP(nip.AsSlice()))
continue
}
if strings.HasPrefix(host, "*.") {

View File

@ -46,7 +46,7 @@ func (g *Group) done() {
// returns a non-nil error or the first time Wait returns, whichever occurs
// first.
func WithContext(ctx context.Context) (*Group, context.Context) {
ctx, cancel := withCancelCause(ctx)
ctx, cancel := context.WithCancelCause(ctx)
return &Group{cancel: cancel}, ctx
}
@ -118,6 +118,7 @@ func (g *Group) TryGo(f func() error) bool {
// SetLimit limits the number of active goroutines in this group to at most n.
// A negative value indicates no limit.
// A limit of zero will prevent any new goroutines from being added.
//
// Any subsequent call to the Go method will block until it can add an active
// goroutine without exceeding the configured limit.

View File

@ -1,13 +0,0 @@
// Copyright 2023 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build go1.20
package errgroup
import "context"
func withCancelCause(parent context.Context) (context.Context, func(error)) {
return context.WithCancelCause(parent)
}

View File

@ -1,14 +0,0 @@
// Copyright 2023 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !go1.20
package errgroup
import "context"
func withCancelCause(parent context.Context) (context.Context, func(error)) {
ctx, cancel := context.WithCancel(parent)
return ctx, func(error) { cancel() }
}

3
vendor/golang.org/x/sys/cpu/cpu.go generated vendored
View File

@ -72,6 +72,9 @@ var X86 struct {
HasSSSE3 bool // Supplemental streaming SIMD extension 3
HasSSE41 bool // Streaming SIMD extension 4 and 4.1
HasSSE42 bool // Streaming SIMD extension 4 and 4.2
HasAVXIFMA bool // Advanced vector extension Integer Fused Multiply Add
HasAVXVNNI bool // Advanced vector extension Vector Neural Network Instructions
HasAVXVNNIInt8 bool // Advanced vector extension Vector Neural Network Int8 instructions
_ CacheLinePad
}

View File

@ -53,6 +53,9 @@ func initOptions() {
{Name: "sse41", Feature: &X86.HasSSE41},
{Name: "sse42", Feature: &X86.HasSSE42},
{Name: "ssse3", Feature: &X86.HasSSSE3},
{Name: "avxifma", Feature: &X86.HasAVXIFMA},
{Name: "avxvnni", Feature: &X86.HasAVXVNNI},
{Name: "avxvnniint8", Feature: &X86.HasAVXVNNIInt8},
// These capabilities should always be enabled on amd64:
{Name: "sse2", Feature: &X86.HasSSE2, Required: runtime.GOARCH == "amd64"},
@ -106,7 +109,7 @@ func archInit() {
return
}
_, ebx7, ecx7, edx7 := cpuid(7, 0)
eax7, ebx7, ecx7, edx7 := cpuid(7, 0)
X86.HasBMI1 = isSet(3, ebx7)
X86.HasAVX2 = isSet(5, ebx7) && osSupportsAVX
X86.HasBMI2 = isSet(8, ebx7)
@ -134,14 +137,24 @@ func archInit() {
X86.HasAVX512VAES = isSet(9, ecx7)
X86.HasAVX512VBMI2 = isSet(6, ecx7)
X86.HasAVX512BITALG = isSet(12, ecx7)
eax71, _, _, _ := cpuid(7, 1)
X86.HasAVX512BF16 = isSet(5, eax71)
}
X86.HasAMXTile = isSet(24, edx7)
X86.HasAMXInt8 = isSet(25, edx7)
X86.HasAMXBF16 = isSet(22, edx7)
// These features depend on the second level of extended features.
if eax7 >= 1 {
eax71, _, _, edx71 := cpuid(7, 1)
if X86.HasAVX512 {
X86.HasAVX512BF16 = isSet(5, eax71)
}
if X86.HasAVX {
X86.HasAVXIFMA = isSet(23, eax71)
X86.HasAVXVNNI = isSet(4, eax71)
X86.HasAVXVNNIInt8 = isSet(4, edx71)
}
}
}
func isSet(bitpos uint, value uint32) bool {

36
vendor/golang.org/x/sys/unix/auxv.go generated vendored Normal file
View File

@ -0,0 +1,36 @@
// Copyright 2025 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build go1.21 && (aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos)
package unix
import (
"syscall"
"unsafe"
)
//go:linkname runtime_getAuxv runtime.getAuxv
func runtime_getAuxv() []uintptr
// Auxv returns the ELF auxiliary vector as a sequence of key/value pairs.
// The returned slice is always a fresh copy, owned by the caller.
// It returns an error on non-ELF platforms, or if the auxiliary vector cannot be accessed,
// which happens in some locked-down environments and build modes.
func Auxv() ([][2]uintptr, error) {
vec := runtime_getAuxv()
vecLen := len(vec)
if vecLen == 0 {
return nil, syscall.ENOENT
}
if vecLen%2 != 0 {
return nil, syscall.EINVAL
}
result := make([]uintptr, vecLen)
copy(result, vec)
return unsafe.Slice((*[2]uintptr)(unsafe.Pointer(&result[0])), vecLen/2), nil
}

13
vendor/golang.org/x/sys/unix/auxv_unsupported.go generated vendored Normal file
View File

@ -0,0 +1,13 @@
// Copyright 2025 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build !go1.21 && (aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos)
package unix
import "syscall"
func Auxv() ([][2]uintptr, error) {
return nil, syscall.ENOTSUP
}

View File

@ -1102,3 +1102,90 @@ func (s *Strioctl) SetInt(i int) {
func IoctlSetStrioctlRetInt(fd int, req int, s *Strioctl) (int, error) {
return ioctlPtrRet(fd, req, unsafe.Pointer(s))
}
// Ucred Helpers
// See ucred(3c) and getpeerucred(3c)
//sys getpeerucred(fd uintptr, ucred *uintptr) (err error)
//sys ucredFree(ucred uintptr) = ucred_free
//sys ucredGet(pid int) (ucred uintptr, err error) = ucred_get
//sys ucredGeteuid(ucred uintptr) (uid int) = ucred_geteuid
//sys ucredGetegid(ucred uintptr) (gid int) = ucred_getegid
//sys ucredGetruid(ucred uintptr) (uid int) = ucred_getruid
//sys ucredGetrgid(ucred uintptr) (gid int) = ucred_getrgid
//sys ucredGetsuid(ucred uintptr) (uid int) = ucred_getsuid
//sys ucredGetsgid(ucred uintptr) (gid int) = ucred_getsgid
//sys ucredGetpid(ucred uintptr) (pid int) = ucred_getpid
// Ucred is an opaque struct that holds user credentials.
type Ucred struct {
ucred uintptr
}
// We need to ensure that ucredFree is called on the underlying ucred
// when the Ucred is garbage collected.
func ucredFinalizer(u *Ucred) {
ucredFree(u.ucred)
}
func GetPeerUcred(fd uintptr) (*Ucred, error) {
var ucred uintptr
err := getpeerucred(fd, &ucred)
if err != nil {
return nil, err
}
result := &Ucred{
ucred: ucred,
}
// set the finalizer on the result so that the ucred will be freed
runtime.SetFinalizer(result, ucredFinalizer)
return result, nil
}
func UcredGet(pid int) (*Ucred, error) {
ucred, err := ucredGet(pid)
if err != nil {
return nil, err
}
result := &Ucred{
ucred: ucred,
}
// set the finalizer on the result so that the ucred will be freed
runtime.SetFinalizer(result, ucredFinalizer)
return result, nil
}
func (u *Ucred) Geteuid() int {
defer runtime.KeepAlive(u)
return ucredGeteuid(u.ucred)
}
func (u *Ucred) Getruid() int {
defer runtime.KeepAlive(u)
return ucredGetruid(u.ucred)
}
func (u *Ucred) Getsuid() int {
defer runtime.KeepAlive(u)
return ucredGetsuid(u.ucred)
}
func (u *Ucred) Getegid() int {
defer runtime.KeepAlive(u)
return ucredGetegid(u.ucred)
}
func (u *Ucred) Getrgid() int {
defer runtime.KeepAlive(u)
return ucredGetrgid(u.ucred)
}
func (u *Ucred) Getsgid() int {
defer runtime.KeepAlive(u)
return ucredGetsgid(u.ucred)
}
func (u *Ucred) Getpid() int {
defer runtime.KeepAlive(u)
return ucredGetpid(u.ucred)
}

View File

@ -1245,6 +1245,7 @@ const (
FAN_REPORT_DFID_NAME = 0xc00
FAN_REPORT_DFID_NAME_TARGET = 0x1e00
FAN_REPORT_DIR_FID = 0x400
FAN_REPORT_FD_ERROR = 0x2000
FAN_REPORT_FID = 0x200
FAN_REPORT_NAME = 0x800
FAN_REPORT_PIDFD = 0x80
@ -1330,8 +1331,10 @@ const (
FUSE_SUPER_MAGIC = 0x65735546
FUTEXFS_SUPER_MAGIC = 0xbad1dea
F_ADD_SEALS = 0x409
F_CREATED_QUERY = 0x404
F_DUPFD = 0x0
F_DUPFD_CLOEXEC = 0x406
F_DUPFD_QUERY = 0x403
F_EXLCK = 0x4
F_GETFD = 0x1
F_GETFL = 0x3
@ -1551,6 +1554,7 @@ const (
IPPROTO_ROUTING = 0x2b
IPPROTO_RSVP = 0x2e
IPPROTO_SCTP = 0x84
IPPROTO_SMC = 0x100
IPPROTO_TCP = 0x6
IPPROTO_TP = 0x1d
IPPROTO_UDP = 0x11
@ -1623,6 +1627,8 @@ const (
IPV6_UNICAST_IF = 0x4c
IPV6_USER_FLOW = 0xe
IPV6_V6ONLY = 0x1a
IPV6_VERSION = 0x60
IPV6_VERSION_MASK = 0xf0
IPV6_XFRM_POLICY = 0x23
IP_ADD_MEMBERSHIP = 0x23
IP_ADD_SOURCE_MEMBERSHIP = 0x27
@ -1867,6 +1873,7 @@ const (
MADV_UNMERGEABLE = 0xd
MADV_WILLNEED = 0x3
MADV_WIPEONFORK = 0x12
MAP_DROPPABLE = 0x8
MAP_FILE = 0x0
MAP_FIXED = 0x10
MAP_FIXED_NOREPLACE = 0x100000
@ -1967,6 +1974,7 @@ const (
MSG_PEEK = 0x2
MSG_PROXY = 0x10
MSG_RST = 0x1000
MSG_SOCK_DEVMEM = 0x2000000
MSG_SYN = 0x400
MSG_TRUNC = 0x20
MSG_TRYHARD = 0x4
@ -2083,6 +2091,7 @@ const (
NFC_ATR_REQ_MAXSIZE = 0x40
NFC_ATR_RES_GB_MAXSIZE = 0x2f
NFC_ATR_RES_MAXSIZE = 0x40
NFC_ATS_MAXSIZE = 0x14
NFC_COMM_ACTIVE = 0x0
NFC_COMM_PASSIVE = 0x1
NFC_DEVICE_NAME_MAXSIZE = 0x8
@ -2163,6 +2172,7 @@ const (
NFNL_SUBSYS_QUEUE = 0x3
NFNL_SUBSYS_ULOG = 0x4
NFS_SUPER_MAGIC = 0x6969
NFT_BITWISE_BOOL = 0x0
NFT_CHAIN_FLAGS = 0x7
NFT_CHAIN_MAXNAMELEN = 0x100
NFT_CT_MAX = 0x17
@ -2491,6 +2501,7 @@ const (
PR_GET_PDEATHSIG = 0x2
PR_GET_SECCOMP = 0x15
PR_GET_SECUREBITS = 0x1b
PR_GET_SHADOW_STACK_STATUS = 0x4a
PR_GET_SPECULATION_CTRL = 0x34
PR_GET_TAGGED_ADDR_CTRL = 0x38
PR_GET_THP_DISABLE = 0x2a
@ -2499,6 +2510,7 @@ const (
PR_GET_TIMING = 0xd
PR_GET_TSC = 0x19
PR_GET_UNALIGN = 0x5
PR_LOCK_SHADOW_STACK_STATUS = 0x4c
PR_MCE_KILL = 0x21
PR_MCE_KILL_CLEAR = 0x0
PR_MCE_KILL_DEFAULT = 0x2
@ -2525,6 +2537,8 @@ const (
PR_PAC_GET_ENABLED_KEYS = 0x3d
PR_PAC_RESET_KEYS = 0x36
PR_PAC_SET_ENABLED_KEYS = 0x3c
PR_PMLEN_MASK = 0x7f000000
PR_PMLEN_SHIFT = 0x18
PR_PPC_DEXCR_CTRL_CLEAR = 0x4
PR_PPC_DEXCR_CTRL_CLEAR_ONEXEC = 0x10
PR_PPC_DEXCR_CTRL_EDITABLE = 0x1
@ -2592,6 +2606,7 @@ const (
PR_SET_PTRACER = 0x59616d61
PR_SET_SECCOMP = 0x16
PR_SET_SECUREBITS = 0x1c
PR_SET_SHADOW_STACK_STATUS = 0x4b
PR_SET_SPECULATION_CTRL = 0x35
PR_SET_SYSCALL_USER_DISPATCH = 0x3b
PR_SET_TAGGED_ADDR_CTRL = 0x37
@ -2602,6 +2617,9 @@ const (
PR_SET_UNALIGN = 0x6
PR_SET_VMA = 0x53564d41
PR_SET_VMA_ANON_NAME = 0x0
PR_SHADOW_STACK_ENABLE = 0x1
PR_SHADOW_STACK_PUSH = 0x4
PR_SHADOW_STACK_WRITE = 0x2
PR_SME_GET_VL = 0x40
PR_SME_SET_VL = 0x3f
PR_SME_SET_VL_ONEXEC = 0x40000
@ -2911,7 +2929,6 @@ const (
RTM_NEWNEXTHOP = 0x68
RTM_NEWNEXTHOPBUCKET = 0x74
RTM_NEWNSID = 0x58
RTM_NEWNVLAN = 0x70
RTM_NEWPREFIX = 0x34
RTM_NEWQDISC = 0x24
RTM_NEWROUTE = 0x18
@ -2920,6 +2937,7 @@ const (
RTM_NEWTCLASS = 0x28
RTM_NEWTFILTER = 0x2c
RTM_NEWTUNNEL = 0x78
RTM_NEWVLAN = 0x70
RTM_NR_FAMILIES = 0x1b
RTM_NR_MSGTYPES = 0x6c
RTM_SETDCB = 0x4f

View File

@ -116,6 +116,8 @@ const (
IN_CLOEXEC = 0x80000
IN_NONBLOCK = 0x800
IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9
IPV6_FLOWINFO_MASK = 0xffffff0f
IPV6_FLOWLABEL_MASK = 0xffff0f00
ISIG = 0x1
IUCLC = 0x200
IXOFF = 0x1000
@ -304,6 +306,7 @@ const (
SCM_TIMESTAMPING_OPT_STATS = 0x36
SCM_TIMESTAMPING_PKTINFO = 0x3a
SCM_TIMESTAMPNS = 0x23
SCM_TS_OPT_ID = 0x51
SCM_TXTIME = 0x3d
SCM_WIFI_STATUS = 0x29
SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103

View File

@ -116,6 +116,8 @@ const (
IN_CLOEXEC = 0x80000
IN_NONBLOCK = 0x800
IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9
IPV6_FLOWINFO_MASK = 0xffffff0f
IPV6_FLOWLABEL_MASK = 0xffff0f00
ISIG = 0x1
IUCLC = 0x200
IXOFF = 0x1000
@ -305,6 +307,7 @@ const (
SCM_TIMESTAMPING_OPT_STATS = 0x36
SCM_TIMESTAMPING_PKTINFO = 0x3a
SCM_TIMESTAMPNS = 0x23
SCM_TS_OPT_ID = 0x51
SCM_TXTIME = 0x3d
SCM_WIFI_STATUS = 0x29
SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103

View File

@ -115,6 +115,8 @@ const (
IN_CLOEXEC = 0x80000
IN_NONBLOCK = 0x800
IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9
IPV6_FLOWINFO_MASK = 0xffffff0f
IPV6_FLOWLABEL_MASK = 0xffff0f00
ISIG = 0x1
IUCLC = 0x200
IXOFF = 0x1000
@ -310,6 +312,7 @@ const (
SCM_TIMESTAMPING_OPT_STATS = 0x36
SCM_TIMESTAMPING_PKTINFO = 0x3a
SCM_TIMESTAMPNS = 0x23
SCM_TS_OPT_ID = 0x51
SCM_TXTIME = 0x3d
SCM_WIFI_STATUS = 0x29
SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103

View File

@ -109,6 +109,7 @@ const (
F_SETOWN = 0x8
F_UNLCK = 0x2
F_WRLCK = 0x1
GCS_MAGIC = 0x47435300
HIDIOCGRAWINFO = 0x80084803
HIDIOCGRDESC = 0x90044802
HIDIOCGRDESCSIZE = 0x80044801
@ -119,6 +120,8 @@ const (
IN_CLOEXEC = 0x80000
IN_NONBLOCK = 0x800
IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9
IPV6_FLOWINFO_MASK = 0xffffff0f
IPV6_FLOWLABEL_MASK = 0xffff0f00
ISIG = 0x1
IUCLC = 0x200
IXOFF = 0x1000
@ -302,6 +305,7 @@ const (
SCM_TIMESTAMPING_OPT_STATS = 0x36
SCM_TIMESTAMPING_PKTINFO = 0x3a
SCM_TIMESTAMPNS = 0x23
SCM_TS_OPT_ID = 0x51
SCM_TXTIME = 0x3d
SCM_WIFI_STATUS = 0x29
SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103

View File

@ -116,6 +116,8 @@ const (
IN_CLOEXEC = 0x80000
IN_NONBLOCK = 0x800
IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9
IPV6_FLOWINFO_MASK = 0xffffff0f
IPV6_FLOWLABEL_MASK = 0xffff0f00
ISIG = 0x1
IUCLC = 0x200
IXOFF = 0x1000
@ -297,6 +299,7 @@ const (
SCM_TIMESTAMPING_OPT_STATS = 0x36
SCM_TIMESTAMPING_PKTINFO = 0x3a
SCM_TIMESTAMPNS = 0x23
SCM_TS_OPT_ID = 0x51
SCM_TXTIME = 0x3d
SCM_WIFI_STATUS = 0x29
SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103

View File

@ -115,6 +115,8 @@ const (
IN_CLOEXEC = 0x80000
IN_NONBLOCK = 0x80
IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9
IPV6_FLOWINFO_MASK = 0xfffffff
IPV6_FLOWLABEL_MASK = 0xfffff
ISIG = 0x1
IUCLC = 0x200
IXOFF = 0x1000
@ -303,6 +305,7 @@ const (
SCM_TIMESTAMPING_OPT_STATS = 0x36
SCM_TIMESTAMPING_PKTINFO = 0x3a
SCM_TIMESTAMPNS = 0x23
SCM_TS_OPT_ID = 0x51
SCM_TXTIME = 0x3d
SCM_WIFI_STATUS = 0x29
SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103

View File

@ -115,6 +115,8 @@ const (
IN_CLOEXEC = 0x80000
IN_NONBLOCK = 0x80
IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9
IPV6_FLOWINFO_MASK = 0xfffffff
IPV6_FLOWLABEL_MASK = 0xfffff
ISIG = 0x1
IUCLC = 0x200
IXOFF = 0x1000
@ -303,6 +305,7 @@ const (
SCM_TIMESTAMPING_OPT_STATS = 0x36
SCM_TIMESTAMPING_PKTINFO = 0x3a
SCM_TIMESTAMPNS = 0x23
SCM_TS_OPT_ID = 0x51
SCM_TXTIME = 0x3d
SCM_WIFI_STATUS = 0x29
SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103

View File

@ -115,6 +115,8 @@ const (
IN_CLOEXEC = 0x80000
IN_NONBLOCK = 0x80
IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9
IPV6_FLOWINFO_MASK = 0xffffff0f
IPV6_FLOWLABEL_MASK = 0xffff0f00
ISIG = 0x1
IUCLC = 0x200
IXOFF = 0x1000
@ -303,6 +305,7 @@ const (
SCM_TIMESTAMPING_OPT_STATS = 0x36
SCM_TIMESTAMPING_PKTINFO = 0x3a
SCM_TIMESTAMPNS = 0x23
SCM_TS_OPT_ID = 0x51
SCM_TXTIME = 0x3d
SCM_WIFI_STATUS = 0x29
SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103

View File

@ -115,6 +115,8 @@ const (
IN_CLOEXEC = 0x80000
IN_NONBLOCK = 0x80
IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9
IPV6_FLOWINFO_MASK = 0xffffff0f
IPV6_FLOWLABEL_MASK = 0xffff0f00
ISIG = 0x1
IUCLC = 0x200
IXOFF = 0x1000
@ -303,6 +305,7 @@ const (
SCM_TIMESTAMPING_OPT_STATS = 0x36
SCM_TIMESTAMPING_PKTINFO = 0x3a
SCM_TIMESTAMPNS = 0x23
SCM_TS_OPT_ID = 0x51
SCM_TXTIME = 0x3d
SCM_WIFI_STATUS = 0x29
SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103

View File

@ -115,6 +115,8 @@ const (
IN_CLOEXEC = 0x80000
IN_NONBLOCK = 0x800
IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9
IPV6_FLOWINFO_MASK = 0xfffffff
IPV6_FLOWLABEL_MASK = 0xfffff
ISIG = 0x80
IUCLC = 0x1000
IXOFF = 0x400
@ -358,6 +360,7 @@ const (
SCM_TIMESTAMPING_OPT_STATS = 0x36
SCM_TIMESTAMPING_PKTINFO = 0x3a
SCM_TIMESTAMPNS = 0x23
SCM_TS_OPT_ID = 0x51
SCM_TXTIME = 0x3d
SCM_WIFI_STATUS = 0x29
SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103

View File

@ -115,6 +115,8 @@ const (
IN_CLOEXEC = 0x80000
IN_NONBLOCK = 0x800
IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9
IPV6_FLOWINFO_MASK = 0xfffffff
IPV6_FLOWLABEL_MASK = 0xfffff
ISIG = 0x80
IUCLC = 0x1000
IXOFF = 0x400
@ -362,6 +364,7 @@ const (
SCM_TIMESTAMPING_OPT_STATS = 0x36
SCM_TIMESTAMPING_PKTINFO = 0x3a
SCM_TIMESTAMPNS = 0x23
SCM_TS_OPT_ID = 0x51
SCM_TXTIME = 0x3d
SCM_WIFI_STATUS = 0x29
SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103

View File

@ -115,6 +115,8 @@ const (
IN_CLOEXEC = 0x80000
IN_NONBLOCK = 0x800
IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9
IPV6_FLOWINFO_MASK = 0xffffff0f
IPV6_FLOWLABEL_MASK = 0xffff0f00
ISIG = 0x80
IUCLC = 0x1000
IXOFF = 0x400
@ -362,6 +364,7 @@ const (
SCM_TIMESTAMPING_OPT_STATS = 0x36
SCM_TIMESTAMPING_PKTINFO = 0x3a
SCM_TIMESTAMPNS = 0x23
SCM_TS_OPT_ID = 0x51
SCM_TXTIME = 0x3d
SCM_WIFI_STATUS = 0x29
SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103

View File

@ -115,6 +115,8 @@ const (
IN_CLOEXEC = 0x80000
IN_NONBLOCK = 0x800
IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9
IPV6_FLOWINFO_MASK = 0xffffff0f
IPV6_FLOWLABEL_MASK = 0xffff0f00
ISIG = 0x1
IUCLC = 0x200
IXOFF = 0x1000
@ -294,6 +296,7 @@ const (
SCM_TIMESTAMPING_OPT_STATS = 0x36
SCM_TIMESTAMPING_PKTINFO = 0x3a
SCM_TIMESTAMPNS = 0x23
SCM_TS_OPT_ID = 0x51
SCM_TXTIME = 0x3d
SCM_WIFI_STATUS = 0x29
SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103

View File

@ -115,6 +115,8 @@ const (
IN_CLOEXEC = 0x80000
IN_NONBLOCK = 0x800
IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9
IPV6_FLOWINFO_MASK = 0xfffffff
IPV6_FLOWLABEL_MASK = 0xfffff
ISIG = 0x1
IUCLC = 0x200
IXOFF = 0x1000
@ -366,6 +368,7 @@ const (
SCM_TIMESTAMPING_OPT_STATS = 0x36
SCM_TIMESTAMPING_PKTINFO = 0x3a
SCM_TIMESTAMPNS = 0x23
SCM_TS_OPT_ID = 0x51
SCM_TXTIME = 0x3d
SCM_WIFI_STATUS = 0x29
SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103

View File

@ -119,6 +119,8 @@ const (
IN_CLOEXEC = 0x400000
IN_NONBLOCK = 0x4000
IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9
IPV6_FLOWINFO_MASK = 0xfffffff
IPV6_FLOWLABEL_MASK = 0xfffff
ISIG = 0x1
IUCLC = 0x200
IXOFF = 0x1000
@ -357,6 +359,7 @@ const (
SCM_TIMESTAMPING_OPT_STATS = 0x38
SCM_TIMESTAMPING_PKTINFO = 0x3c
SCM_TIMESTAMPNS = 0x21
SCM_TS_OPT_ID = 0x5a
SCM_TXTIME = 0x3f
SCM_WIFI_STATUS = 0x25
SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103

View File

@ -141,6 +141,16 @@ import (
//go:cgo_import_dynamic libc_getpeername getpeername "libsocket.so"
//go:cgo_import_dynamic libc_setsockopt setsockopt "libsocket.so"
//go:cgo_import_dynamic libc_recvfrom recvfrom "libsocket.so"
//go:cgo_import_dynamic libc_getpeerucred getpeerucred "libc.so"
//go:cgo_import_dynamic libc_ucred_get ucred_get "libc.so"
//go:cgo_import_dynamic libc_ucred_geteuid ucred_geteuid "libc.so"
//go:cgo_import_dynamic libc_ucred_getegid ucred_getegid "libc.so"
//go:cgo_import_dynamic libc_ucred_getruid ucred_getruid "libc.so"
//go:cgo_import_dynamic libc_ucred_getrgid ucred_getrgid "libc.so"
//go:cgo_import_dynamic libc_ucred_getsuid ucred_getsuid "libc.so"
//go:cgo_import_dynamic libc_ucred_getsgid ucred_getsgid "libc.so"
//go:cgo_import_dynamic libc_ucred_getpid ucred_getpid "libc.so"
//go:cgo_import_dynamic libc_ucred_free ucred_free "libc.so"
//go:cgo_import_dynamic libc_port_create port_create "libc.so"
//go:cgo_import_dynamic libc_port_associate port_associate "libc.so"
//go:cgo_import_dynamic libc_port_dissociate port_dissociate "libc.so"
@ -280,6 +290,16 @@ import (
//go:linkname procgetpeername libc_getpeername
//go:linkname procsetsockopt libc_setsockopt
//go:linkname procrecvfrom libc_recvfrom
//go:linkname procgetpeerucred libc_getpeerucred
//go:linkname procucred_get libc_ucred_get
//go:linkname procucred_geteuid libc_ucred_geteuid
//go:linkname procucred_getegid libc_ucred_getegid
//go:linkname procucred_getruid libc_ucred_getruid
//go:linkname procucred_getrgid libc_ucred_getrgid
//go:linkname procucred_getsuid libc_ucred_getsuid
//go:linkname procucred_getsgid libc_ucred_getsgid
//go:linkname procucred_getpid libc_ucred_getpid
//go:linkname procucred_free libc_ucred_free
//go:linkname procport_create libc_port_create
//go:linkname procport_associate libc_port_associate
//go:linkname procport_dissociate libc_port_dissociate
@ -420,6 +440,16 @@ var (
procgetpeername,
procsetsockopt,
procrecvfrom,
procgetpeerucred,
procucred_get,
procucred_geteuid,
procucred_getegid,
procucred_getruid,
procucred_getrgid,
procucred_getsuid,
procucred_getsgid,
procucred_getpid,
procucred_free,
procport_create,
procport_associate,
procport_dissociate,
@ -2029,6 +2059,90 @@ func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Sockl
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func getpeerucred(fd uintptr, ucred *uintptr) (err error) {
_, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procgetpeerucred)), 2, uintptr(fd), uintptr(unsafe.Pointer(ucred)), 0, 0, 0, 0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func ucredGet(pid int) (ucred uintptr, err error) {
r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procucred_get)), 1, uintptr(pid), 0, 0, 0, 0, 0)
ucred = uintptr(r0)
if e1 != 0 {
err = errnoErr(e1)
}
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func ucredGeteuid(ucred uintptr) (uid int) {
r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_geteuid)), 1, uintptr(ucred), 0, 0, 0, 0, 0)
uid = int(r0)
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func ucredGetegid(ucred uintptr) (gid int) {
r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_getegid)), 1, uintptr(ucred), 0, 0, 0, 0, 0)
gid = int(r0)
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func ucredGetruid(ucred uintptr) (uid int) {
r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_getruid)), 1, uintptr(ucred), 0, 0, 0, 0, 0)
uid = int(r0)
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func ucredGetrgid(ucred uintptr) (gid int) {
r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_getrgid)), 1, uintptr(ucred), 0, 0, 0, 0, 0)
gid = int(r0)
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func ucredGetsuid(ucred uintptr) (uid int) {
r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_getsuid)), 1, uintptr(ucred), 0, 0, 0, 0, 0)
uid = int(r0)
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func ucredGetsgid(ucred uintptr) (gid int) {
r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_getsgid)), 1, uintptr(ucred), 0, 0, 0, 0, 0)
gid = int(r0)
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func ucredGetpid(ucred uintptr) (pid int) {
r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_getpid)), 1, uintptr(ucred), 0, 0, 0, 0, 0)
pid = int(r0)
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func ucredFree(ucred uintptr) {
sysvicall6(uintptr(unsafe.Pointer(&procucred_free)), 1, uintptr(ucred), 0, 0, 0, 0, 0)
return
}
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
func port_create() (n int, err error) {
r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procport_create)), 0, 0, 0, 0, 0, 0, 0)
n = int(r0)

View File

@ -458,4 +458,8 @@ const (
SYS_LSM_SET_SELF_ATTR = 460
SYS_LSM_LIST_MODULES = 461
SYS_MSEAL = 462
SYS_SETXATTRAT = 463
SYS_GETXATTRAT = 464
SYS_LISTXATTRAT = 465
SYS_REMOVEXATTRAT = 466
)

View File

@ -381,4 +381,8 @@ const (
SYS_LSM_SET_SELF_ATTR = 460
SYS_LSM_LIST_MODULES = 461
SYS_MSEAL = 462
SYS_SETXATTRAT = 463
SYS_GETXATTRAT = 464
SYS_LISTXATTRAT = 465
SYS_REMOVEXATTRAT = 466
)

View File

@ -422,4 +422,8 @@ const (
SYS_LSM_SET_SELF_ATTR = 460
SYS_LSM_LIST_MODULES = 461
SYS_MSEAL = 462
SYS_SETXATTRAT = 463
SYS_GETXATTRAT = 464
SYS_LISTXATTRAT = 465
SYS_REMOVEXATTRAT = 466
)

View File

@ -325,4 +325,8 @@ const (
SYS_LSM_SET_SELF_ATTR = 460
SYS_LSM_LIST_MODULES = 461
SYS_MSEAL = 462
SYS_SETXATTRAT = 463
SYS_GETXATTRAT = 464
SYS_LISTXATTRAT = 465
SYS_REMOVEXATTRAT = 466
)

View File

@ -321,4 +321,8 @@ const (
SYS_LSM_SET_SELF_ATTR = 460
SYS_LSM_LIST_MODULES = 461
SYS_MSEAL = 462
SYS_SETXATTRAT = 463
SYS_GETXATTRAT = 464
SYS_LISTXATTRAT = 465
SYS_REMOVEXATTRAT = 466
)

View File

@ -442,4 +442,8 @@ const (
SYS_LSM_SET_SELF_ATTR = 4460
SYS_LSM_LIST_MODULES = 4461
SYS_MSEAL = 4462
SYS_SETXATTRAT = 4463
SYS_GETXATTRAT = 4464
SYS_LISTXATTRAT = 4465
SYS_REMOVEXATTRAT = 4466
)

View File

@ -372,4 +372,8 @@ const (
SYS_LSM_SET_SELF_ATTR = 5460
SYS_LSM_LIST_MODULES = 5461
SYS_MSEAL = 5462
SYS_SETXATTRAT = 5463
SYS_GETXATTRAT = 5464
SYS_LISTXATTRAT = 5465
SYS_REMOVEXATTRAT = 5466
)

View File

@ -372,4 +372,8 @@ const (
SYS_LSM_SET_SELF_ATTR = 5460
SYS_LSM_LIST_MODULES = 5461
SYS_MSEAL = 5462
SYS_SETXATTRAT = 5463
SYS_GETXATTRAT = 5464
SYS_LISTXATTRAT = 5465
SYS_REMOVEXATTRAT = 5466
)

View File

@ -442,4 +442,8 @@ const (
SYS_LSM_SET_SELF_ATTR = 4460
SYS_LSM_LIST_MODULES = 4461
SYS_MSEAL = 4462
SYS_SETXATTRAT = 4463
SYS_GETXATTRAT = 4464
SYS_LISTXATTRAT = 4465
SYS_REMOVEXATTRAT = 4466
)

View File

@ -449,4 +449,8 @@ const (
SYS_LSM_SET_SELF_ATTR = 460
SYS_LSM_LIST_MODULES = 461
SYS_MSEAL = 462
SYS_SETXATTRAT = 463
SYS_GETXATTRAT = 464
SYS_LISTXATTRAT = 465
SYS_REMOVEXATTRAT = 466
)

View File

@ -421,4 +421,8 @@ const (
SYS_LSM_SET_SELF_ATTR = 460
SYS_LSM_LIST_MODULES = 461
SYS_MSEAL = 462
SYS_SETXATTRAT = 463
SYS_GETXATTRAT = 464
SYS_LISTXATTRAT = 465
SYS_REMOVEXATTRAT = 466
)

View File

@ -421,4 +421,8 @@ const (
SYS_LSM_SET_SELF_ATTR = 460
SYS_LSM_LIST_MODULES = 461
SYS_MSEAL = 462
SYS_SETXATTRAT = 463
SYS_GETXATTRAT = 464
SYS_LISTXATTRAT = 465
SYS_REMOVEXATTRAT = 466
)

View File

@ -326,4 +326,8 @@ const (
SYS_LSM_SET_SELF_ATTR = 460
SYS_LSM_LIST_MODULES = 461
SYS_MSEAL = 462
SYS_SETXATTRAT = 463
SYS_GETXATTRAT = 464
SYS_LISTXATTRAT = 465
SYS_REMOVEXATTRAT = 466
)

View File

@ -387,4 +387,8 @@ const (
SYS_LSM_SET_SELF_ATTR = 460
SYS_LSM_LIST_MODULES = 461
SYS_MSEAL = 462
SYS_SETXATTRAT = 463
SYS_GETXATTRAT = 464
SYS_LISTXATTRAT = 465
SYS_REMOVEXATTRAT = 466
)

View File

@ -400,4 +400,8 @@ const (
SYS_LSM_SET_SELF_ATTR = 460
SYS_LSM_LIST_MODULES = 461
SYS_MSEAL = 462
SYS_SETXATTRAT = 463
SYS_GETXATTRAT = 464
SYS_LISTXATTRAT = 465
SYS_REMOVEXATTRAT = 466
)

View File

@ -4747,7 +4747,7 @@ const (
NL80211_ATTR_MAC_HINT = 0xc8
NL80211_ATTR_MAC_MASK = 0xd7
NL80211_ATTR_MAX_AP_ASSOC_STA = 0xca
NL80211_ATTR_MAX = 0x14c
NL80211_ATTR_MAX = 0x14d
NL80211_ATTR_MAX_CRIT_PROT_DURATION = 0xb4
NL80211_ATTR_MAX_CSA_COUNTERS = 0xce
NL80211_ATTR_MAX_MATCH_SETS = 0x85
@ -5519,7 +5519,7 @@ const (
NL80211_MNTR_FLAG_CONTROL = 0x3
NL80211_MNTR_FLAG_COOK_FRAMES = 0x5
NL80211_MNTR_FLAG_FCSFAIL = 0x1
NL80211_MNTR_FLAG_MAX = 0x6
NL80211_MNTR_FLAG_MAX = 0x7
NL80211_MNTR_FLAG_OTHER_BSS = 0x4
NL80211_MNTR_FLAG_PLCPFAIL = 0x2
NL80211_MPATH_FLAG_ACTIVE = 0x1
@ -6174,3 +6174,5 @@ type SockDiagReq struct {
Family uint8
Protocol uint8
}
const RTM_NEWNVLAN = 0x70

View File

@ -59,7 +59,7 @@ func (c CanonType) Parse(s string) (t Tag, err error) {
if changed {
tt.RemakeString()
}
return makeTag(tt), err
return makeTag(tt), nil
}
// Compose creates a Tag from individual parts, which may be of type Tag, Base,

View File

@ -85,7 +85,7 @@ func (lim *Limiter) Burst() int {
// TokensAt returns the number of tokens available at time t.
func (lim *Limiter) TokensAt(t time.Time) float64 {
lim.mu.Lock()
_, tokens := lim.advance(t) // does not mutate lim
tokens := lim.advance(t) // does not mutate lim
lim.mu.Unlock()
return tokens
}
@ -186,7 +186,7 @@ func (r *Reservation) CancelAt(t time.Time) {
return
}
// advance time to now
t, tokens := r.lim.advance(t)
tokens := r.lim.advance(t)
// calculate new number of tokens
tokens += restoreTokens
if burst := float64(r.lim.burst); tokens > burst {
@ -307,7 +307,7 @@ func (lim *Limiter) SetLimitAt(t time.Time, newLimit Limit) {
lim.mu.Lock()
defer lim.mu.Unlock()
t, tokens := lim.advance(t)
tokens := lim.advance(t)
lim.last = t
lim.tokens = tokens
@ -324,7 +324,7 @@ func (lim *Limiter) SetBurstAt(t time.Time, newBurst int) {
lim.mu.Lock()
defer lim.mu.Unlock()
t, tokens := lim.advance(t)
tokens := lim.advance(t)
lim.last = t
lim.tokens = tokens
@ -347,7 +347,7 @@ func (lim *Limiter) reserveN(t time.Time, n int, maxFutureReserve time.Duration)
}
}
t, tokens := lim.advance(t)
tokens := lim.advance(t)
// Calculate the remaining number of tokens resulting from the request.
tokens -= float64(n)
@ -380,10 +380,11 @@ func (lim *Limiter) reserveN(t time.Time, n int, maxFutureReserve time.Duration)
return r
}
// advance calculates and returns an updated state for lim resulting from the passage of time.
// advance calculates and returns an updated number of tokens for lim
// resulting from the passage of time.
// lim is not changed.
// advance requires that lim.mu is held.
func (lim *Limiter) advance(t time.Time) (newT time.Time, newTokens float64) {
func (lim *Limiter) advance(t time.Time) (newTokens float64) {
last := lim.last
if t.Before(last) {
last = t
@ -396,7 +397,7 @@ func (lim *Limiter) advance(t time.Time) (newT time.Time, newTokens float64) {
if burst := float64(lim.burst); tokens > burst {
tokens = burst
}
return t, tokens
return tokens
}
// durationFromTokens is a unit conversion function from the number of tokens to the duration
@ -405,8 +406,15 @@ func (limit Limit) durationFromTokens(tokens float64) time.Duration {
if limit <= 0 {
return InfDuration
}
seconds := tokens / float64(limit)
return time.Duration(float64(time.Second) * seconds)
duration := (tokens / float64(limit)) * float64(time.Second)
// Cap the duration to the maximum representable int64 value, to avoid overflow.
if duration > float64(math.MaxInt64) {
return InfDuration
}
return time.Duration(duration)
}
// tokensFromDuration is a unit conversion function from a time duration to the number of tokens

27
vendor/golang.org/x/tools/LICENSE generated vendored
View File

@ -1,27 +0,0 @@
Copyright 2009 The Go Authors.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google LLC nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

22
vendor/golang.org/x/tools/PATENTS generated vendored
View File

@ -1,22 +0,0 @@
Additional IP Rights Grant (Patents)
"This implementation" means the copyrightable works distributed by
Google as part of the Go project.
Google hereby grants to You a perpetual, worldwide, non-exclusive,
no-charge, royalty-free, irrevocable (except as stated in this section)
patent license to make, have made, use, offer to sell, sell, import,
transfer and otherwise run, modify and propagate the contents of this
implementation of Go, where such license applies only to those patent
claims, both currently owned or controlled by Google and acquired in
the future, licensable by Google that are necessarily infringed by this
implementation of Go. This grant does not include claims that would be
infringed only as a consequence of further modification of this
implementation. If you or your agent or exclusive licensee institute or
order or agree to the institution of patent litigation against any
entity (including a cross-claim or counterclaim in a lawsuit) alleging
that this implementation of Go or any code incorporated within this
implementation of Go constitutes direct or contributory patent
infringement, or inducement of patent infringement, then any patent
rights granted to you under this License for this implementation of Go
shall terminate as of the date such litigation is filed.

View File

@ -1,239 +0,0 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package gcexportdata provides functions for reading and writing
// export data, which is a serialized description of the API of a Go
// package including the names, kinds, types, and locations of all
// exported declarations.
//
// The standard Go compiler (cmd/compile) writes an export data file
// for each package it compiles, which it later reads when compiling
// packages that import the earlier one. The compiler must thus
// contain logic to both write and read export data.
// (See the "Export" section in the cmd/compile/README file.)
//
// The [Read] function in this package can read files produced by the
// compiler, producing [go/types] data structures. As a matter of
// policy, Read supports export data files produced by only the last
// two Go releases plus tip; see https://go.dev/issue/68898. The
// export data files produced by the compiler contain additional
// details related to generics, inlining, and other optimizations that
// cannot be decoded by the [Read] function.
//
// In files written by the compiler, the export data is not at the
// start of the file. Before calling Read, use [NewReader] to locate
// the desired portion of the file.
//
// The [Write] function in this package encodes the exported API of a
// Go package ([types.Package]) as a file. Such files can be later
// decoded by Read, but cannot be consumed by the compiler.
//
// # Future changes
//
// Although Read supports the formats written by both Write and the
// compiler, the two are quite different, and there is an open
// proposal (https://go.dev/issue/69491) to separate these APIs.
//
// Under that proposal, this package would ultimately provide only the
// Read operation for compiler export data, which must be defined in
// this module (golang.org/x/tools), not in the standard library, to
// avoid version skew for developer tools that need to read compiler
// export data both before and after a Go release, such as from Go
// 1.23 to Go 1.24. Because this package lives in the tools module,
// clients can update their version of the module some time before the
// Go 1.24 release and rebuild and redeploy their tools, which will
// then be able to consume both Go 1.23 and Go 1.24 export data files,
// so they will work before and after the Go update. (See discussion
// at https://go.dev/issue/15651.)
//
// The operations to import and export [go/types] data structures
// would be defined in the go/types package as Import and Export.
// [Write] would (eventually) delegate to Export,
// and [Read], when it detects a file produced by Export,
// would delegate to Import.
//
// # Deprecations
//
// The [NewImporter] and [Find] functions are deprecated and should
// not be used in new code. The [WriteBundle] and [ReadBundle]
// functions are experimental, and there is an open proposal to
// deprecate them (https://go.dev/issue/69573).
package gcexportdata
import (
"bufio"
"bytes"
"encoding/json"
"fmt"
"go/token"
"go/types"
"io"
"os/exec"
"golang.org/x/tools/internal/gcimporter"
)
// Find returns the name of an object (.o) or archive (.a) file
// containing type information for the specified import path,
// using the go command.
// If no file was found, an empty filename is returned.
//
// A relative srcDir is interpreted relative to the current working directory.
//
// Find also returns the package's resolved (canonical) import path,
// reflecting the effects of srcDir and vendoring on importPath.
//
// Deprecated: Use the higher-level API in golang.org/x/tools/go/packages,
// which is more efficient.
func Find(importPath, srcDir string) (filename, path string) {
cmd := exec.Command("go", "list", "-json", "-export", "--", importPath)
cmd.Dir = srcDir
out, err := cmd.Output()
if err != nil {
return "", ""
}
var data struct {
ImportPath string
Export string
}
json.Unmarshal(out, &data)
return data.Export, data.ImportPath
}
// NewReader returns a reader for the export data section of an object
// (.o) or archive (.a) file read from r. The new reader may provide
// additional trailing data beyond the end of the export data.
func NewReader(r io.Reader) (io.Reader, error) {
buf := bufio.NewReader(r)
size, err := gcimporter.FindExportData(buf)
if err != nil {
return nil, err
}
// We were given an archive and found the __.PKGDEF in it.
// This tells us the size of the export data, and we don't
// need to return the entire file.
return &io.LimitedReader{
R: buf,
N: size,
}, nil
}
// readAll works the same way as io.ReadAll, but avoids allocations and copies
// by preallocating a byte slice of the necessary size if the size is known up
// front. This is always possible when the input is an archive. In that case,
// NewReader will return the known size using an io.LimitedReader.
func readAll(r io.Reader) ([]byte, error) {
if lr, ok := r.(*io.LimitedReader); ok {
data := make([]byte, lr.N)
_, err := io.ReadFull(lr, data)
return data, err
}
return io.ReadAll(r)
}
// Read reads export data from in, decodes it, and returns type
// information for the package.
//
// Read is capable of reading export data produced by [Write] at the
// same source code version, or by the last two Go releases (plus tip)
// of the standard Go compiler. Reading files from older compilers may
// produce an error.
//
// The package path (effectively its linker symbol prefix) is
// specified by path, since unlike the package name, this information
// may not be recorded in the export data.
//
// File position information is added to fset.
//
// Read may inspect and add to the imports map to ensure that references
// within the export data to other packages are consistent. The caller
// must ensure that imports[path] does not exist, or exists but is
// incomplete (see types.Package.Complete), and Read inserts the
// resulting package into this map entry.
//
// On return, the state of the reader is undefined.
func Read(in io.Reader, fset *token.FileSet, imports map[string]*types.Package, path string) (*types.Package, error) {
data, err := readAll(in)
if err != nil {
return nil, fmt.Errorf("reading export data for %q: %v", path, err)
}
if bytes.HasPrefix(data, []byte("!<arch>")) {
return nil, fmt.Errorf("can't read export data for %q directly from an archive file (call gcexportdata.NewReader first to extract export data)", path)
}
// The indexed export format starts with an 'i'; the older
// binary export format starts with a 'c', 'd', or 'v'
// (from "version"). Select appropriate importer.
if len(data) > 0 {
switch data[0] {
case 'v', 'c', 'd':
// binary, produced by cmd/compile till go1.10
return nil, fmt.Errorf("binary (%c) import format is no longer supported", data[0])
case 'i':
// indexed, produced by cmd/compile till go1.19,
// and also by [Write].
//
// If proposal #69491 is accepted, go/types
// serialization will be implemented by
// types.Export, to which Write would eventually
// delegate (explicitly dropping any pretence at
// inter-version Write-Read compatibility).
// This [Read] function would delegate to types.Import
// when it detects that the file was produced by Export.
_, pkg, err := gcimporter.IImportData(fset, imports, data[1:], path)
return pkg, err
case 'u':
// unified, produced by cmd/compile since go1.20
_, pkg, err := gcimporter.UImportData(fset, imports, data[1:], path)
return pkg, err
default:
l := len(data)
if l > 10 {
l = 10
}
return nil, fmt.Errorf("unexpected export data with prefix %q for path %s", string(data[:l]), path)
}
}
return nil, fmt.Errorf("empty export data for %s", path)
}
// Write writes encoded type information for the specified package to out.
// The FileSet provides file position information for named objects.
func Write(out io.Writer, fset *token.FileSet, pkg *types.Package) error {
if _, err := io.WriteString(out, "i"); err != nil {
return err
}
return gcimporter.IExportData(out, fset, pkg)
}
// ReadBundle reads an export bundle from in, decodes it, and returns type
// information for the packages.
// File position information is added to fset.
//
// ReadBundle may inspect and add to the imports map to ensure that references
// within the export bundle to other packages are consistent.
//
// On return, the state of the reader is undefined.
//
// Experimental: This API is experimental and may change in the future.
func ReadBundle(in io.Reader, fset *token.FileSet, imports map[string]*types.Package) ([]*types.Package, error) {
data, err := readAll(in)
if err != nil {
return nil, fmt.Errorf("reading export bundle: %v", err)
}
return gcimporter.IImportBundle(fset, imports, data)
}
// WriteBundle writes encoded type information for the specified packages to out.
// The FileSet provides file position information for named objects.
//
// Experimental: This API is experimental and may change in the future.
func WriteBundle(out io.Writer, fset *token.FileSet, pkgs []*types.Package) error {
return gcimporter.IExportBundle(out, fset, pkgs)
}

View File

@ -1,75 +0,0 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gcexportdata
import (
"fmt"
"go/token"
"go/types"
"os"
)
// NewImporter returns a new instance of the types.Importer interface
// that reads type information from export data files written by gc.
// The Importer also satisfies types.ImporterFrom.
//
// Export data files are located using "go build" workspace conventions
// and the build.Default context.
//
// Use this importer instead of go/importer.For("gc", ...) to avoid the
// version-skew problems described in the documentation of this package,
// or to control the FileSet or access the imports map populated during
// package loading.
//
// Deprecated: Use the higher-level API in golang.org/x/tools/go/packages,
// which is more efficient.
func NewImporter(fset *token.FileSet, imports map[string]*types.Package) types.ImporterFrom {
return importer{fset, imports}
}
type importer struct {
fset *token.FileSet
imports map[string]*types.Package
}
func (imp importer) Import(importPath string) (*types.Package, error) {
return imp.ImportFrom(importPath, "", 0)
}
func (imp importer) ImportFrom(importPath, srcDir string, mode types.ImportMode) (_ *types.Package, err error) {
filename, path := Find(importPath, srcDir)
if filename == "" {
if importPath == "unsafe" {
// Even for unsafe, call Find first in case
// the package was vendored.
return types.Unsafe, nil
}
return nil, fmt.Errorf("can't find import: %s", importPath)
}
if pkg, ok := imp.imports[path]; ok && pkg.Complete() {
return pkg, nil // cache hit
}
// open file
f, err := os.Open(filename)
if err != nil {
return nil, err
}
defer func() {
f.Close()
if err != nil {
// add file name to error
err = fmt.Errorf("reading export data: %s: %v", filename, err)
}
}()
r, err := NewReader(f)
if err != nil {
return nil, err
}
return Read(r, imp.fset, imp.imports, path)
}

View File

@ -1,251 +0,0 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
/*
Package packages loads Go packages for inspection and analysis.
The [Load] function takes as input a list of patterns and returns a
list of [Package] values describing individual packages matched by those
patterns.
A [Config] specifies configuration options, the most important of which is
the [LoadMode], which controls the amount of detail in the loaded packages.
Load passes most patterns directly to the underlying build tool.
The default build tool is the go command.
Its supported patterns are described at
https://pkg.go.dev/cmd/go#hdr-Package_lists_and_patterns.
Other build systems may be supported by providing a "driver";
see [The driver protocol].
All patterns with the prefix "query=", where query is a
non-empty string of letters from [a-z], are reserved and may be
interpreted as query operators.
Two query operators are currently supported: "file" and "pattern".
The query "file=path/to/file.go" matches the package or packages enclosing
the Go source file path/to/file.go. For example "file=~/go/src/fmt/print.go"
might return the packages "fmt" and "fmt [fmt.test]".
The query "pattern=string" causes "string" to be passed directly to
the underlying build tool. In most cases this is unnecessary,
but an application can use Load("pattern=" + x) as an escaping mechanism
to ensure that x is not interpreted as a query operator if it contains '='.
All other query operators are reserved for future use and currently
cause Load to report an error.
The Package struct provides basic information about the package, including
- ID, a unique identifier for the package in the returned set;
- GoFiles, the names of the package's Go source files;
- Imports, a map from source import strings to the Packages they name;
- Types, the type information for the package's exported symbols;
- Syntax, the parsed syntax trees for the package's source code; and
- TypesInfo, the result of a complete type-check of the package syntax trees.
(See the documentation for type Package for the complete list of fields
and more detailed descriptions.)
For example,
Load(nil, "bytes", "unicode...")
returns four Package structs describing the standard library packages
bytes, unicode, unicode/utf16, and unicode/utf8. Note that one pattern
can match multiple packages and that a package might be matched by
multiple patterns: in general it is not possible to determine which
packages correspond to which patterns.
Note that the list returned by Load contains only the packages matched
by the patterns. Their dependencies can be found by walking the import
graph using the Imports fields.
The Load function can be configured by passing a pointer to a Config as
the first argument. A nil Config is equivalent to the zero Config, which
causes Load to run in [LoadFiles] mode, collecting minimal information.
See the documentation for type Config for details.
As noted earlier, the Config.Mode controls the amount of detail
reported about the loaded packages. See the documentation for type LoadMode
for details.
Most tools should pass their command-line arguments (after any flags)
uninterpreted to Load, so that it can interpret them
according to the conventions of the underlying build system.
See the Example function for typical usage.
# The driver protocol
Load may be used to load Go packages even in Go projects that use
alternative build systems, by installing an appropriate "driver"
program for the build system and specifying its location in the
GOPACKAGESDRIVER environment variable.
For example,
https://github.com/bazelbuild/rules_go/wiki/Editor-and-tool-integration
explains how to use the driver for Bazel.
The driver program is responsible for interpreting patterns in its
preferred notation and reporting information about the packages that
those patterns identify. Drivers must also support the special "file="
and "pattern=" patterns described above.
The patterns are provided as positional command-line arguments. A
JSON-encoded [DriverRequest] message providing additional information
is written to the driver's standard input. The driver must write a
JSON-encoded [DriverResponse] message to its standard output. (This
message differs from the JSON schema produced by 'go list'.)
The value of the PWD environment variable seen by the driver process
is the preferred name of its working directory. (The working directory
may have other aliases due to symbolic links; see the comment on the
Dir field of [exec.Cmd] for related information.)
When the driver process emits in its response the name of a file
that is a descendant of this directory, it must use an absolute path
that has the value of PWD as a prefix, to ensure that the returned
filenames satisfy the original query.
*/
package packages // import "golang.org/x/tools/go/packages"
/*
Motivation and design considerations
The new package's design solves problems addressed by two existing
packages: go/build, which locates and describes packages, and
golang.org/x/tools/go/loader, which loads, parses and type-checks them.
The go/build.Package structure encodes too much of the 'go build' way
of organizing projects, leaving us in need of a data type that describes a
package of Go source code independent of the underlying build system.
We wanted something that works equally well with go build and vgo, and
also other build systems such as Bazel and Blaze, making it possible to
construct analysis tools that work in all these environments.
Tools such as errcheck and staticcheck were essentially unavailable to
the Go community at Google, and some of Google's internal tools for Go
are unavailable externally.
This new package provides a uniform way to obtain package metadata by
querying each of these build systems, optionally supporting their
preferred command-line notations for packages, so that tools integrate
neatly with users' build environments. The Metadata query function
executes an external query tool appropriate to the current workspace.
Loading packages always returns the complete import graph "all the way down",
even if all you want is information about a single package, because the query
mechanisms of all the build systems we currently support ({go,vgo} list, and
blaze/bazel aspect-based query) cannot provide detailed information
about one package without visiting all its dependencies too, so there is
no additional asymptotic cost to providing transitive information.
(This property might not be true of a hypothetical 5th build system.)
In calls to TypeCheck, all initial packages, and any package that
transitively depends on one of them, must be loaded from source.
Consider A->B->C->D->E: if A,C are initial, A,B,C must be loaded from
source; D may be loaded from export data, and E may not be loaded at all
(though it's possible that D's export data mentions it, so a
types.Package may be created for it and exposed.)
The old loader had a feature to suppress type-checking of function
bodies on a per-package basis, primarily intended to reduce the work of
obtaining type information for imported packages. Now that imports are
satisfied by export data, the optimization no longer seems necessary.
Despite some early attempts, the old loader did not exploit export data,
instead always using the equivalent of WholeProgram mode. This was due
to the complexity of mixing source and export data packages (now
resolved by the upward traversal mentioned above), and because export data
files were nearly always missing or stale. Now that 'go build' supports
caching, all the underlying build systems can guarantee to produce
export data in a reasonable (amortized) time.
Test "main" packages synthesized by the build system are now reported as
first-class packages, avoiding the need for clients (such as go/ssa) to
reinvent this generation logic.
One way in which go/packages is simpler than the old loader is in its
treatment of in-package tests. In-package tests are packages that
consist of all the files of the library under test, plus the test files.
The old loader constructed in-package tests by a two-phase process of
mutation called "augmentation": first it would construct and type check
all the ordinary library packages and type-check the packages that
depend on them; then it would add more (test) files to the package and
type-check again. This two-phase approach had four major problems:
1) in processing the tests, the loader modified the library package,
leaving no way for a client application to see both the test
package and the library package; one would mutate into the other.
2) because test files can declare additional methods on types defined in
the library portion of the package, the dispatch of method calls in
the library portion was affected by the presence of the test files.
This should have been a clue that the packages were logically
different.
3) this model of "augmentation" assumed at most one in-package test
per library package, which is true of projects using 'go build',
but not other build systems.
4) because of the two-phase nature of test processing, all packages that
import the library package had to be processed before augmentation,
forcing a "one-shot" API and preventing the client from calling Load
in several times in sequence as is now possible in WholeProgram mode.
(TypeCheck mode has a similar one-shot restriction for a different reason.)
Early drafts of this package supported "multi-shot" operation.
Although it allowed clients to make a sequence of calls (or concurrent
calls) to Load, building up the graph of Packages incrementally,
it was of marginal value: it complicated the API
(since it allowed some options to vary across calls but not others),
it complicated the implementation,
it cannot be made to work in Types mode, as explained above,
and it was less efficient than making one combined call (when this is possible).
Among the clients we have inspected, none made multiple calls to load
but could not be easily and satisfactorily modified to make only a single call.
However, applications changes may be required.
For example, the ssadump command loads the user-specified packages
and in addition the runtime package. It is tempting to simply append
"runtime" to the user-provided list, but that does not work if the user
specified an ad-hoc package such as [a.go b.go].
Instead, ssadump no longer requests the runtime package,
but seeks it among the dependencies of the user-specified packages,
and emits an error if it is not found.
Questions & Tasks
- Add GOARCH/GOOS?
They are not portable concepts, but could be made portable.
Our goal has been to allow users to express themselves using the conventions
of the underlying build system: if the build system honors GOARCH
during a build and during a metadata query, then so should
applications built atop that query mechanism.
Conversely, if the target architecture of the build is determined by
command-line flags, the application can pass the relevant
flags through to the build system using a command such as:
myapp -query_flag="--cpu=amd64" -query_flag="--os=darwin"
However, this approach is low-level, unwieldy, and non-portable.
GOOS and GOARCH seem important enough to warrant a dedicated option.
- How should we handle partial failures such as a mixture of good and
malformed patterns, existing and non-existent packages, successful and
failed builds, import failures, import cycles, and so on, in a call to
Load?
- Support bazel, blaze, and go1.10 list, not just go1.11 list.
- Handle (and test) various partial success cases, e.g.
a mixture of good packages and:
invalid patterns
nonexistent packages
empty packages
packages with malformed package or import declarations
unreadable files
import cycles
other parse errors
type errors
Make sure we record errors at the correct place in the graph.
- Missing packages among initial arguments are not reported.
Return bogus packages for them, like golist does.
- "undeclared name" errors (for example) are reported out of source file
order. I suspect this is due to the breadth-first resolution now used
by go/types. Is that a bug? Discuss with gri.
*/

View File

@ -1,153 +0,0 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package packages
// This file defines the protocol that enables an external "driver"
// tool to supply package metadata in place of 'go list'.
import (
"bytes"
"encoding/json"
"fmt"
"os"
"os/exec"
"slices"
"strings"
)
// DriverRequest defines the schema of a request for package metadata
// from an external driver program. The JSON-encoded DriverRequest
// message is provided to the driver program's standard input. The
// query patterns are provided as command-line arguments.
//
// See the package documentation for an overview.
type DriverRequest struct {
Mode LoadMode `json:"mode"`
// Env specifies the environment the underlying build system should be run in.
Env []string `json:"env"`
// BuildFlags are flags that should be passed to the underlying build system.
BuildFlags []string `json:"build_flags"`
// Tests specifies whether the patterns should also return test packages.
Tests bool `json:"tests"`
// Overlay maps file paths (relative to the driver's working directory)
// to the contents of overlay files (see Config.Overlay).
Overlay map[string][]byte `json:"overlay"`
}
// DriverResponse defines the schema of a response from an external
// driver program, providing the results of a query for package
// metadata. The driver program must write a JSON-encoded
// DriverResponse message to its standard output.
//
// See the package documentation for an overview.
type DriverResponse struct {
// NotHandled is returned if the request can't be handled by the current
// driver. If an external driver returns a response with NotHandled, the
// rest of the DriverResponse is ignored, and go/packages will fallback
// to the next driver. If go/packages is extended in the future to support
// lists of multiple drivers, go/packages will fall back to the next driver.
NotHandled bool
// Compiler and Arch are the arguments pass of types.SizesFor
// to get a types.Sizes to use when type checking.
Compiler string
Arch string
// Roots is the set of package IDs that make up the root packages.
// We have to encode this separately because when we encode a single package
// we cannot know if it is one of the roots as that requires knowledge of the
// graph it is part of.
Roots []string `json:",omitempty"`
// Packages is the full set of packages in the graph.
// The packages are not connected into a graph.
// The Imports if populated will be stubs that only have their ID set.
// Imports will be connected and then type and syntax information added in a
// later pass (see refine).
Packages []*Package
// GoVersion is the minor version number used by the driver
// (e.g. the go command on the PATH) when selecting .go files.
// Zero means unknown.
GoVersion int
}
// driver is the type for functions that query the build system for the
// packages named by the patterns.
type driver func(cfg *Config, patterns []string) (*DriverResponse, error)
// findExternalDriver returns the file path of a tool that supplies
// the build system package structure, or "" if not found.
// If GOPACKAGESDRIVER is set in the environment findExternalTool returns its
// value, otherwise it searches for a binary named gopackagesdriver on the PATH.
func findExternalDriver(cfg *Config) driver {
const toolPrefix = "GOPACKAGESDRIVER="
tool := ""
for _, env := range cfg.Env {
if val := strings.TrimPrefix(env, toolPrefix); val != env {
tool = val
}
}
if tool != "" && tool == "off" {
return nil
}
if tool == "" {
var err error
tool, err = exec.LookPath("gopackagesdriver")
if err != nil {
return nil
}
}
return func(cfg *Config, patterns []string) (*DriverResponse, error) {
req, err := json.Marshal(DriverRequest{
Mode: cfg.Mode,
Env: cfg.Env,
BuildFlags: cfg.BuildFlags,
Tests: cfg.Tests,
Overlay: cfg.Overlay,
})
if err != nil {
return nil, fmt.Errorf("failed to encode message to driver tool: %v", err)
}
buf := new(bytes.Buffer)
stderr := new(bytes.Buffer)
cmd := exec.CommandContext(cfg.Context, tool, patterns...)
cmd.Dir = cfg.Dir
// The cwd gets resolved to the real path. On Darwin, where
// /tmp is a symlink, this breaks anything that expects the
// working directory to keep the original path, including the
// go command when dealing with modules.
//
// os.Getwd stdlib has a special feature where if the
// cwd and the PWD are the same node then it trusts
// the PWD, so by setting it in the env for the child
// process we fix up all the paths returned by the go
// command.
//
// (See similar trick in Invocation.run in ../../internal/gocommand/invoke.go)
cmd.Env = append(slices.Clip(cfg.Env), "PWD="+cfg.Dir)
cmd.Stdin = bytes.NewReader(req)
cmd.Stdout = buf
cmd.Stderr = stderr
if err := cmd.Run(); err != nil {
return nil, fmt.Errorf("%v: %v: %s", tool, err, cmd.Stderr)
}
if len(stderr.Bytes()) != 0 && os.Getenv("GOPACKAGESPRINTDRIVERERRORS") != "" {
fmt.Fprintf(os.Stderr, "%s stderr: <<%s>>\n", cmdDebugStr(cmd), stderr)
}
var response DriverResponse
if err := json.Unmarshal(buf.Bytes(), &response); err != nil {
return nil, err
}
return &response, nil
}
}

File diff suppressed because it is too large Load Diff

View File

@ -1,83 +0,0 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package packages
import (
"encoding/json"
"path/filepath"
"golang.org/x/tools/internal/gocommand"
)
// determineRootDirs returns a mapping from absolute directories that could
// contain code to their corresponding import path prefixes.
func (state *golistState) determineRootDirs() (map[string]string, error) {
env, err := state.getEnv()
if err != nil {
return nil, err
}
if env["GOMOD"] != "" {
state.rootsOnce.Do(func() {
state.rootDirs, state.rootDirsError = state.determineRootDirsModules()
})
} else {
state.rootsOnce.Do(func() {
state.rootDirs, state.rootDirsError = state.determineRootDirsGOPATH()
})
}
return state.rootDirs, state.rootDirsError
}
func (state *golistState) determineRootDirsModules() (map[string]string, error) {
// List all of the modules--the first will be the directory for the main
// module. Any replaced modules will also need to be treated as roots.
// Editing files in the module cache isn't a great idea, so we don't
// plan to ever support that.
out, err := state.invokeGo("list", "-m", "-json", "all")
if err != nil {
// 'go list all' will fail if we're outside of a module and
// GO111MODULE=on. Try falling back without 'all'.
var innerErr error
out, innerErr = state.invokeGo("list", "-m", "-json")
if innerErr != nil {
return nil, err
}
}
roots := map[string]string{}
modules := map[string]string{}
var i int
for dec := json.NewDecoder(out); dec.More(); {
mod := new(gocommand.ModuleJSON)
if err := dec.Decode(mod); err != nil {
return nil, err
}
if mod.Dir != "" && mod.Path != "" {
// This is a valid module; add it to the map.
absDir, err := filepath.Abs(mod.Dir)
if err != nil {
return nil, err
}
modules[absDir] = mod.Path
// The first result is the main module.
if i == 0 || mod.Replace != nil && mod.Replace.Path != "" {
roots[absDir] = mod.Path
}
}
i++
}
return roots, nil
}
func (state *golistState) determineRootDirsGOPATH() (map[string]string, error) {
m := map[string]string{}
for _, dir := range filepath.SplitList(state.mustGetEnv()["GOPATH"]) {
absDir, err := filepath.Abs(dir)
if err != nil {
return nil, err
}
m[filepath.Join(absDir, "src")] = ""
}
return m, nil
}

View File

@ -1,56 +0,0 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package packages
import (
"fmt"
"strings"
)
var modes = [...]struct {
mode LoadMode
name string
}{
{NeedName, "NeedName"},
{NeedFiles, "NeedFiles"},
{NeedCompiledGoFiles, "NeedCompiledGoFiles"},
{NeedImports, "NeedImports"},
{NeedDeps, "NeedDeps"},
{NeedExportFile, "NeedExportFile"},
{NeedTypes, "NeedTypes"},
{NeedSyntax, "NeedSyntax"},
{NeedTypesInfo, "NeedTypesInfo"},
{NeedTypesSizes, "NeedTypesSizes"},
{NeedForTest, "NeedForTest"},
{NeedModule, "NeedModule"},
{NeedEmbedFiles, "NeedEmbedFiles"},
{NeedEmbedPatterns, "NeedEmbedPatterns"},
{NeedTarget, "NeedTarget"},
}
func (mode LoadMode) String() string {
if mode == 0 {
return "LoadMode(0)"
}
var out []string
// named bits
for _, item := range modes {
if (mode & item.mode) != 0 {
mode ^= item.mode
out = append(out, item.name)
}
}
// unnamed residue
if mode != 0 {
if out == nil {
return fmt.Sprintf("LoadMode(%#x)", int(mode))
}
out = append(out, fmt.Sprintf("%#x", int(mode)))
}
if len(out) == 1 {
return out[0]
}
return "(" + strings.Join(out, "|") + ")"
}

File diff suppressed because it is too large Load Diff

View File

@ -1,68 +0,0 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package packages
import (
"fmt"
"os"
"sort"
)
// Visit visits all the packages in the import graph whose roots are
// pkgs, calling the optional pre function the first time each package
// is encountered (preorder), and the optional post function after a
// package's dependencies have been visited (postorder).
// The boolean result of pre(pkg) determines whether
// the imports of package pkg are visited.
func Visit(pkgs []*Package, pre func(*Package) bool, post func(*Package)) {
seen := make(map[*Package]bool)
var visit func(*Package)
visit = func(pkg *Package) {
if !seen[pkg] {
seen[pkg] = true
if pre == nil || pre(pkg) {
paths := make([]string, 0, len(pkg.Imports))
for path := range pkg.Imports {
paths = append(paths, path)
}
sort.Strings(paths) // Imports is a map, this makes visit stable
for _, path := range paths {
visit(pkg.Imports[path])
}
}
if post != nil {
post(pkg)
}
}
}
for _, pkg := range pkgs {
visit(pkg)
}
}
// PrintErrors prints to os.Stderr the accumulated errors of all
// packages in the import graph rooted at pkgs, dependencies first.
// PrintErrors returns the number of errors printed.
func PrintErrors(pkgs []*Package) int {
var n int
errModules := make(map[*Module]bool)
Visit(pkgs, nil, func(pkg *Package) {
for _, err := range pkg.Errors {
fmt.Fprintln(os.Stderr, err)
n++
}
// Print pkg.Module.Error once if present.
mod := pkg.Module
if mod != nil && mod.Error != nil && !errModules[mod] {
errModules[mod] = true
fmt.Fprintln(os.Stderr, mod.Error.Err)
n++
}
})
return n
}

View File

@ -1,817 +0,0 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package objectpath defines a naming scheme for types.Objects
// (that is, named entities in Go programs) relative to their enclosing
// package.
//
// Type-checker objects are canonical, so they are usually identified by
// their address in memory (a pointer), but a pointer has meaning only
// within one address space. By contrast, objectpath names allow the
// identity of an object to be sent from one program to another,
// establishing a correspondence between types.Object variables that are
// distinct but logically equivalent.
//
// A single object may have multiple paths. In this example,
//
// type A struct{ X int }
// type B A
//
// the field X has two paths due to its membership of both A and B.
// The For(obj) function always returns one of these paths, arbitrarily
// but consistently.
package objectpath
import (
"fmt"
"go/types"
"strconv"
"strings"
"golang.org/x/tools/internal/aliases"
"golang.org/x/tools/internal/typesinternal"
)
// TODO(adonovan): think about generic aliases.
// A Path is an opaque name that identifies a types.Object
// relative to its package. Conceptually, the name consists of a
// sequence of destructuring operations applied to the package scope
// to obtain the original object.
// The name does not include the package itself.
type Path string
// Encoding
//
// An object path is a textual and (with training) human-readable encoding
// of a sequence of destructuring operators, starting from a types.Package.
// The sequences represent a path through the package/object/type graph.
// We classify these operators by their type:
//
// PO package->object Package.Scope.Lookup
// OT object->type Object.Type
// TT type->type Type.{Elem,Key,{,{,Recv}Type}Params,Results,Underlying,Rhs} [EKPRUTrCa]
// TO type->object Type.{At,Field,Method,Obj} [AFMO]
//
// All valid paths start with a package and end at an object
// and thus may be defined by the regular language:
//
// objectpath = PO (OT TT* TO)*
//
// The concrete encoding follows directly:
// - The only PO operator is Package.Scope.Lookup, which requires an identifier.
// - The only OT operator is Object.Type,
// which we encode as '.' because dot cannot appear in an identifier.
// - The TT operators are encoded as [EKPRUTrCa];
// two of these ({,Recv}TypeParams) require an integer operand,
// which is encoded as a string of decimal digits.
// - The TO operators are encoded as [AFMO];
// three of these (At,Field,Method) require an integer operand,
// which is encoded as a string of decimal digits.
// These indices are stable across different representations
// of the same package, even source and export data.
// The indices used are implementation specific and may not correspond to
// the argument to the go/types function.
//
// In the example below,
//
// package p
//
// type T interface {
// f() (a string, b struct{ X int })
// }
//
// field X has the path "T.UM0.RA1.F0",
// representing the following sequence of operations:
//
// p.Lookup("T") T
// .Type().Underlying().Method(0). f
// .Type().Results().At(1) b
// .Type().Field(0) X
//
// The encoding is not maximally compact---every R or P is
// followed by an A, for example---but this simplifies the
// encoder and decoder.
const (
// object->type operators
opType = '.' // .Type() (Object)
// type->type operators
opElem = 'E' // .Elem() (Pointer, Slice, Array, Chan, Map)
opKey = 'K' // .Key() (Map)
opParams = 'P' // .Params() (Signature)
opResults = 'R' // .Results() (Signature)
opUnderlying = 'U' // .Underlying() (Named)
opTypeParam = 'T' // .TypeParams.At(i) (Named, Signature)
opRecvTypeParam = 'r' // .RecvTypeParams.At(i) (Signature)
opConstraint = 'C' // .Constraint() (TypeParam)
opRhs = 'a' // .Rhs() (Alias)
// type->object operators
opAt = 'A' // .At(i) (Tuple)
opField = 'F' // .Field(i) (Struct)
opMethod = 'M' // .Method(i) (Named or Interface; not Struct: "promoted" names are ignored)
opObj = 'O' // .Obj() (Named, TypeParam)
)
// For is equivalent to new(Encoder).For(obj).
//
// It may be more efficient to reuse a single Encoder across several calls.
func For(obj types.Object) (Path, error) {
return new(Encoder).For(obj)
}
// An Encoder amortizes the cost of encoding the paths of multiple objects.
// The zero value of an Encoder is ready to use.
type Encoder struct {
scopeMemo map[*types.Scope][]types.Object // memoization of scopeObjects
}
// For returns the path to an object relative to its package,
// or an error if the object is not accessible from the package's Scope.
//
// The For function guarantees to return a path only for the following objects:
// - package-level types
// - exported package-level non-types
// - methods
// - parameter and result variables
// - struct fields
// These objects are sufficient to define the API of their package.
// The objects described by a package's export data are drawn from this set.
//
// The set of objects accessible from a package's Scope depends on
// whether the package was produced by type-checking syntax, or
// reading export data; the latter may have a smaller Scope since
// export data trims objects that are not reachable from an exported
// declaration. For example, the For function will return a path for
// an exported method of an unexported type that is not reachable
// from any public declaration; this path will cause the Object
// function to fail if called on a package loaded from export data.
// TODO(adonovan): is this a bug or feature? Should this package
// compute accessibility in the same way?
//
// For does not return a path for predeclared names, imported package
// names, local names, and unexported package-level names (except
// types).
//
// Example: given this definition,
//
// package p
//
// type T interface {
// f() (a string, b struct{ X int })
// }
//
// For(X) would return a path that denotes the following sequence of operations:
//
// p.Scope().Lookup("T") (TypeName T)
// .Type().Underlying().Method(0). (method Func f)
// .Type().Results().At(1) (field Var b)
// .Type().Field(0) (field Var X)
//
// where p is the package (*types.Package) to which X belongs.
func (enc *Encoder) For(obj types.Object) (Path, error) {
pkg := obj.Pkg()
// This table lists the cases of interest.
//
// Object Action
// ------ ------
// nil reject
// builtin reject
// pkgname reject
// label reject
// var
// package-level accept
// func param/result accept
// local reject
// struct field accept
// const
// package-level accept
// local reject
// func
// package-level accept
// init functions reject
// concrete method accept
// interface method accept
// type
// package-level accept
// local reject
//
// The only accessible package-level objects are members of pkg itself.
//
// The cases are handled in four steps:
//
// 1. reject nil and builtin
// 2. accept package-level objects
// 3. reject obviously invalid objects
// 4. search the API for the path to the param/result/field/method.
// 1. reference to nil or builtin?
if pkg == nil {
return "", fmt.Errorf("predeclared %s has no path", obj)
}
scope := pkg.Scope()
// 2. package-level object?
if scope.Lookup(obj.Name()) == obj {
// Only exported objects (and non-exported types) have a path.
// Non-exported types may be referenced by other objects.
if _, ok := obj.(*types.TypeName); !ok && !obj.Exported() {
return "", fmt.Errorf("no path for non-exported %v", obj)
}
return Path(obj.Name()), nil
}
// 3. Not a package-level object.
// Reject obviously non-viable cases.
switch obj := obj.(type) {
case *types.TypeName:
if _, ok := types.Unalias(obj.Type()).(*types.TypeParam); !ok {
// With the exception of type parameters, only package-level type names
// have a path.
return "", fmt.Errorf("no path for %v", obj)
}
case *types.Const, // Only package-level constants have a path.
*types.Label, // Labels are function-local.
*types.PkgName: // PkgNames are file-local.
return "", fmt.Errorf("no path for %v", obj)
case *types.Var:
// Could be:
// - a field (obj.IsField())
// - a func parameter or result
// - a local var.
// Sadly there is no way to distinguish
// a param/result from a local
// so we must proceed to the find.
case *types.Func:
// A func, if not package-level, must be a method.
if recv := obj.Type().(*types.Signature).Recv(); recv == nil {
return "", fmt.Errorf("func is not a method: %v", obj)
}
if path, ok := enc.concreteMethod(obj); ok {
// Fast path for concrete methods that avoids looping over scope.
return path, nil
}
default:
panic(obj)
}
// 4. Search the API for the path to the var (field/param/result) or method.
// First inspect package-level named types.
// In the presence of path aliases, these give
// the best paths because non-types may
// refer to types, but not the reverse.
empty := make([]byte, 0, 48) // initial space
objs := enc.scopeObjects(scope)
for _, o := range objs {
tname, ok := o.(*types.TypeName)
if !ok {
continue // handle non-types in second pass
}
path := append(empty, o.Name()...)
path = append(path, opType)
T := o.Type()
if alias, ok := T.(*types.Alias); ok {
if r := findTypeParam(obj, aliases.TypeParams(alias), path, opTypeParam); r != nil {
return Path(r), nil
}
if r := find(obj, aliases.Rhs(alias), append(path, opRhs)); r != nil {
return Path(r), nil
}
} else if tname.IsAlias() {
// legacy alias
if r := find(obj, T, path); r != nil {
return Path(r), nil
}
} else if named, ok := T.(*types.Named); ok {
// defined (named) type
if r := findTypeParam(obj, named.TypeParams(), path, opTypeParam); r != nil {
return Path(r), nil
}
if r := find(obj, named.Underlying(), append(path, opUnderlying)); r != nil {
return Path(r), nil
}
}
}
// Then inspect everything else:
// non-types, and declared methods of defined types.
for _, o := range objs {
path := append(empty, o.Name()...)
if _, ok := o.(*types.TypeName); !ok {
if o.Exported() {
// exported non-type (const, var, func)
if r := find(obj, o.Type(), append(path, opType)); r != nil {
return Path(r), nil
}
}
continue
}
// Inspect declared methods of defined types.
if T, ok := types.Unalias(o.Type()).(*types.Named); ok {
path = append(path, opType)
// The method index here is always with respect
// to the underlying go/types data structures,
// which ultimately derives from source order
// and must be preserved by export data.
for i := 0; i < T.NumMethods(); i++ {
m := T.Method(i)
path2 := appendOpArg(path, opMethod, i)
if m == obj {
return Path(path2), nil // found declared method
}
if r := find(obj, m.Type(), append(path2, opType)); r != nil {
return Path(r), nil
}
}
}
}
return "", fmt.Errorf("can't find path for %v in %s", obj, pkg.Path())
}
func appendOpArg(path []byte, op byte, arg int) []byte {
path = append(path, op)
path = strconv.AppendInt(path, int64(arg), 10)
return path
}
// concreteMethod returns the path for meth, which must have a non-nil receiver.
// The second return value indicates success and may be false if the method is
// an interface method or if it is an instantiated method.
//
// This function is just an optimization that avoids the general scope walking
// approach. You are expected to fall back to the general approach if this
// function fails.
func (enc *Encoder) concreteMethod(meth *types.Func) (Path, bool) {
// Concrete methods can only be declared on package-scoped named types. For
// that reason we can skip the expensive walk over the package scope: the
// path will always be package -> named type -> method. We can trivially get
// the type name from the receiver, and only have to look over the type's
// methods to find the method index.
//
// Methods on generic types require special consideration, however. Consider
// the following package:
//
// L1: type S[T any] struct{}
// L2: func (recv S[A]) Foo() { recv.Bar() }
// L3: func (recv S[B]) Bar() { }
// L4: type Alias = S[int]
// L5: func _[T any]() { var s S[int]; s.Foo() }
//
// The receivers of methods on generic types are instantiations. L2 and L3
// instantiate S with the type-parameters A and B, which are scoped to the
// respective methods. L4 and L5 each instantiate S with int. Each of these
// instantiations has its own method set, full of methods (and thus objects)
// with receivers whose types are the respective instantiations. In other
// words, we have
//
// S[A].Foo, S[A].Bar
// S[B].Foo, S[B].Bar
// S[int].Foo, S[int].Bar
//
// We may thus be trying to produce object paths for any of these objects.
//
// S[A].Foo and S[B].Bar are the origin methods, and their paths are S.Foo
// and S.Bar, which are the paths that this function naturally produces.
//
// S[A].Bar, S[B].Foo, and both methods on S[int] are instantiations that
// don't correspond to the origin methods. For S[int], this is significant.
// The most precise object path for S[int].Foo, for example, is Alias.Foo,
// not S.Foo. Our function, however, would produce S.Foo, which would
// resolve to a different object.
//
// For S[A].Bar and S[B].Foo it could be argued that S.Bar and S.Foo are
// still the correct paths, since only the origin methods have meaningful
// paths. But this is likely only true for trivial cases and has edge cases.
// Since this function is only an optimization, we err on the side of giving
// up, deferring to the slower but definitely correct algorithm. Most users
// of objectpath will only be giving us origin methods, anyway, as referring
// to instantiated methods is usually not useful.
if meth.Origin() != meth {
return "", false
}
_, named := typesinternal.ReceiverNamed(meth.Type().(*types.Signature).Recv())
if named == nil {
return "", false
}
if types.IsInterface(named) {
// Named interfaces don't have to be package-scoped
//
// TODO(dominikh): opt: if scope.Lookup(name) == named, then we can apply this optimization to interface
// methods, too, I think.
return "", false
}
// Preallocate space for the name, opType, opMethod, and some digits.
name := named.Obj().Name()
path := make([]byte, 0, len(name)+8)
path = append(path, name...)
path = append(path, opType)
// Method indices are w.r.t. the go/types data structures,
// ultimately deriving from source order,
// which is preserved by export data.
for i := 0; i < named.NumMethods(); i++ {
if named.Method(i) == meth {
path = appendOpArg(path, opMethod, i)
return Path(path), true
}
}
// Due to golang/go#59944, go/types fails to associate the receiver with
// certain methods on cgo types.
//
// TODO(rfindley): replace this panic once golang/go#59944 is fixed in all Go
// versions gopls supports.
return "", false
// panic(fmt.Sprintf("couldn't find method %s on type %s; methods: %#v", meth, named, enc.namedMethods(named)))
}
// find finds obj within type T, returning the path to it, or nil if not found.
//
// The seen map is used to short circuit cycles through type parameters. If
// nil, it will be allocated as necessary.
//
// The seenMethods map is used internally to short circuit cycles through
// interface methods, such as occur in the following example:
//
// type I interface { f() interface{I} }
//
// See golang/go#68046 for details.
func find(obj types.Object, T types.Type, path []byte) []byte {
return (&finder{obj: obj}).find(T, path)
}
// finder closes over search state for a call to find.
type finder struct {
obj types.Object // the sought object
seenTParamNames map[*types.TypeName]bool // for cycle breaking through type parameters
seenMethods map[*types.Func]bool // for cycle breaking through recursive interfaces
}
func (f *finder) find(T types.Type, path []byte) []byte {
switch T := T.(type) {
case *types.Alias:
return f.find(types.Unalias(T), path)
case *types.Basic, *types.Named:
// Named types belonging to pkg were handled already,
// so T must belong to another package. No path.
return nil
case *types.Pointer:
return f.find(T.Elem(), append(path, opElem))
case *types.Slice:
return f.find(T.Elem(), append(path, opElem))
case *types.Array:
return f.find(T.Elem(), append(path, opElem))
case *types.Chan:
return f.find(T.Elem(), append(path, opElem))
case *types.Map:
if r := f.find(T.Key(), append(path, opKey)); r != nil {
return r
}
return f.find(T.Elem(), append(path, opElem))
case *types.Signature:
if r := f.findTypeParam(T.RecvTypeParams(), path, opRecvTypeParam); r != nil {
return r
}
if r := f.findTypeParam(T.TypeParams(), path, opTypeParam); r != nil {
return r
}
if r := f.find(T.Params(), append(path, opParams)); r != nil {
return r
}
return f.find(T.Results(), append(path, opResults))
case *types.Struct:
for i := 0; i < T.NumFields(); i++ {
fld := T.Field(i)
path2 := appendOpArg(path, opField, i)
if fld == f.obj {
return path2 // found field var
}
if r := f.find(fld.Type(), append(path2, opType)); r != nil {
return r
}
}
return nil
case *types.Tuple:
for i := 0; i < T.Len(); i++ {
v := T.At(i)
path2 := appendOpArg(path, opAt, i)
if v == f.obj {
return path2 // found param/result var
}
if r := f.find(v.Type(), append(path2, opType)); r != nil {
return r
}
}
return nil
case *types.Interface:
for i := 0; i < T.NumMethods(); i++ {
m := T.Method(i)
if f.seenMethods[m] {
return nil
}
path2 := appendOpArg(path, opMethod, i)
if m == f.obj {
return path2 // found interface method
}
if f.seenMethods == nil {
f.seenMethods = make(map[*types.Func]bool)
}
f.seenMethods[m] = true
if r := f.find(m.Type(), append(path2, opType)); r != nil {
return r
}
}
return nil
case *types.TypeParam:
name := T.Obj()
if f.seenTParamNames[name] {
return nil
}
if name == f.obj {
return append(path, opObj)
}
if f.seenTParamNames == nil {
f.seenTParamNames = make(map[*types.TypeName]bool)
}
f.seenTParamNames[name] = true
if r := f.find(T.Constraint(), append(path, opConstraint)); r != nil {
return r
}
return nil
}
panic(T)
}
func findTypeParam(obj types.Object, list *types.TypeParamList, path []byte, op byte) []byte {
return (&finder{obj: obj}).findTypeParam(list, path, op)
}
func (f *finder) findTypeParam(list *types.TypeParamList, path []byte, op byte) []byte {
for i := 0; i < list.Len(); i++ {
tparam := list.At(i)
path2 := appendOpArg(path, op, i)
if r := f.find(tparam, path2); r != nil {
return r
}
}
return nil
}
// Object returns the object denoted by path p within the package pkg.
func Object(pkg *types.Package, p Path) (types.Object, error) {
pathstr := string(p)
if pathstr == "" {
return nil, fmt.Errorf("empty path")
}
var pkgobj, suffix string
if dot := strings.IndexByte(pathstr, opType); dot < 0 {
pkgobj = pathstr
} else {
pkgobj = pathstr[:dot]
suffix = pathstr[dot:] // suffix starts with "."
}
obj := pkg.Scope().Lookup(pkgobj)
if obj == nil {
return nil, fmt.Errorf("package %s does not contain %q", pkg.Path(), pkgobj)
}
// abstraction of *types.{Pointer,Slice,Array,Chan,Map}
type hasElem interface {
Elem() types.Type
}
// abstraction of *types.{Named,Signature}
type hasTypeParams interface {
TypeParams() *types.TypeParamList
}
// abstraction of *types.{Named,TypeParam}
type hasObj interface {
Obj() *types.TypeName
}
// The loop state is the pair (t, obj),
// exactly one of which is non-nil, initially obj.
// All suffixes start with '.' (the only object->type operation),
// followed by optional type->type operations,
// then a type->object operation.
// The cycle then repeats.
var t types.Type
for suffix != "" {
code := suffix[0]
suffix = suffix[1:]
// Codes [AFMTr] have an integer operand.
var index int
switch code {
case opAt, opField, opMethod, opTypeParam, opRecvTypeParam:
rest := strings.TrimLeft(suffix, "0123456789")
numerals := suffix[:len(suffix)-len(rest)]
suffix = rest
i, err := strconv.Atoi(numerals)
if err != nil {
return nil, fmt.Errorf("invalid path: bad numeric operand %q for code %q", numerals, code)
}
index = int(i)
case opObj:
// no operand
default:
// The suffix must end with a type->object operation.
if suffix == "" {
return nil, fmt.Errorf("invalid path: ends with %q, want [AFMO]", code)
}
}
if code == opType {
if t != nil {
return nil, fmt.Errorf("invalid path: unexpected %q in type context", opType)
}
t = obj.Type()
obj = nil
continue
}
if t == nil {
return nil, fmt.Errorf("invalid path: code %q in object context", code)
}
// Inv: t != nil, obj == nil
t = types.Unalias(t)
switch code {
case opElem:
hasElem, ok := t.(hasElem) // Pointer, Slice, Array, Chan, Map
if !ok {
return nil, fmt.Errorf("cannot apply %q to %s (got %T, want pointer, slice, array, chan or map)", code, t, t)
}
t = hasElem.Elem()
case opKey:
mapType, ok := t.(*types.Map)
if !ok {
return nil, fmt.Errorf("cannot apply %q to %s (got %T, want map)", code, t, t)
}
t = mapType.Key()
case opParams:
sig, ok := t.(*types.Signature)
if !ok {
return nil, fmt.Errorf("cannot apply %q to %s (got %T, want signature)", code, t, t)
}
t = sig.Params()
case opResults:
sig, ok := t.(*types.Signature)
if !ok {
return nil, fmt.Errorf("cannot apply %q to %s (got %T, want signature)", code, t, t)
}
t = sig.Results()
case opUnderlying:
named, ok := t.(*types.Named)
if !ok {
return nil, fmt.Errorf("cannot apply %q to %s (got %T, want named)", code, t, t)
}
t = named.Underlying()
case opRhs:
if alias, ok := t.(*types.Alias); ok {
t = aliases.Rhs(alias)
} else if false && aliases.Enabled() {
// The Enabled check is too expensive, so for now we
// simply assume that aliases are not enabled.
// TODO(adonovan): replace with "if true {" when go1.24 is assured.
return nil, fmt.Errorf("cannot apply %q to %s (got %T, want alias)", code, t, t)
}
case opTypeParam:
hasTypeParams, ok := t.(hasTypeParams) // Named, Signature
if !ok {
return nil, fmt.Errorf("cannot apply %q to %s (got %T, want named or signature)", code, t, t)
}
tparams := hasTypeParams.TypeParams()
if n := tparams.Len(); index >= n {
return nil, fmt.Errorf("tuple index %d out of range [0-%d)", index, n)
}
t = tparams.At(index)
case opRecvTypeParam:
sig, ok := t.(*types.Signature) // Signature
if !ok {
return nil, fmt.Errorf("cannot apply %q to %s (got %T, want signature)", code, t, t)
}
rtparams := sig.RecvTypeParams()
if n := rtparams.Len(); index >= n {
return nil, fmt.Errorf("tuple index %d out of range [0-%d)", index, n)
}
t = rtparams.At(index)
case opConstraint:
tparam, ok := t.(*types.TypeParam)
if !ok {
return nil, fmt.Errorf("cannot apply %q to %s (got %T, want type parameter)", code, t, t)
}
t = tparam.Constraint()
case opAt:
tuple, ok := t.(*types.Tuple)
if !ok {
return nil, fmt.Errorf("cannot apply %q to %s (got %T, want tuple)", code, t, t)
}
if n := tuple.Len(); index >= n {
return nil, fmt.Errorf("tuple index %d out of range [0-%d)", index, n)
}
obj = tuple.At(index)
t = nil
case opField:
structType, ok := t.(*types.Struct)
if !ok {
return nil, fmt.Errorf("cannot apply %q to %s (got %T, want struct)", code, t, t)
}
if n := structType.NumFields(); index >= n {
return nil, fmt.Errorf("field index %d out of range [0-%d)", index, n)
}
obj = structType.Field(index)
t = nil
case opMethod:
switch t := t.(type) {
case *types.Interface:
if index >= t.NumMethods() {
return nil, fmt.Errorf("method index %d out of range [0-%d)", index, t.NumMethods())
}
obj = t.Method(index) // Id-ordered
case *types.Named:
if index >= t.NumMethods() {
return nil, fmt.Errorf("method index %d out of range [0-%d)", index, t.NumMethods())
}
obj = t.Method(index)
default:
return nil, fmt.Errorf("cannot apply %q to %s (got %T, want interface or named)", code, t, t)
}
t = nil
case opObj:
hasObj, ok := t.(hasObj)
if !ok {
return nil, fmt.Errorf("cannot apply %q to %s (got %T, want named or type param)", code, t, t)
}
obj = hasObj.Obj()
t = nil
default:
return nil, fmt.Errorf("invalid path: unknown code %q", code)
}
}
if obj == nil {
panic(p) // path does not end in an object-valued operator
}
if obj.Pkg() != pkg {
return nil, fmt.Errorf("path denotes %s, which belongs to a different package", obj)
}
return obj, nil // success
}
// scopeObjects is a memoization of scope objects.
// Callers must not modify the result.
func (enc *Encoder) scopeObjects(scope *types.Scope) []types.Object {
m := enc.scopeMemo
if m == nil {
m = make(map[*types.Scope][]types.Object)
enc.scopeMemo = m
}
objs, ok := m[scope]
if !ok {
names := scope.Names() // allocates and sorts
objs = make([]types.Object, len(names))
for i, name := range names {
objs[i] = scope.Lookup(name)
}
m[scope] = objs
}
return objs
}

View File

@ -1,68 +0,0 @@
// Copyright 2018 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package typeutil
import (
"go/ast"
"go/types"
"golang.org/x/tools/internal/typeparams"
)
// Callee returns the named target of a function call, if any:
// a function, method, builtin, or variable.
//
// Functions and methods may potentially have type parameters.
func Callee(info *types.Info, call *ast.CallExpr) types.Object {
fun := ast.Unparen(call.Fun)
// Look through type instantiation if necessary.
isInstance := false
switch fun.(type) {
case *ast.IndexExpr, *ast.IndexListExpr:
// When extracting the callee from an *IndexExpr, we need to check that
// it is a *types.Func and not a *types.Var.
// Example: Don't match a slice m within the expression `m[0]()`.
isInstance = true
fun, _, _, _ = typeparams.UnpackIndexExpr(fun)
}
var obj types.Object
switch fun := fun.(type) {
case *ast.Ident:
obj = info.Uses[fun] // type, var, builtin, or declared func
case *ast.SelectorExpr:
if sel, ok := info.Selections[fun]; ok {
obj = sel.Obj() // method or field
} else {
obj = info.Uses[fun.Sel] // qualified identifier?
}
}
if _, ok := obj.(*types.TypeName); ok {
return nil // T(x) is a conversion, not a call
}
// A Func is required to match instantiations.
if _, ok := obj.(*types.Func); isInstance && !ok {
return nil // Was not a Func.
}
return obj
}
// StaticCallee returns the target (function or method) of a static function
// call, if any. It returns nil for calls to builtins.
//
// Note: for calls of instantiated functions and methods, StaticCallee returns
// the corresponding generic function or method on the generic type.
func StaticCallee(info *types.Info, call *ast.CallExpr) *types.Func {
if f, ok := Callee(info, call).(*types.Func); ok && !interfaceMethod(f) {
return f
}
return nil
}
func interfaceMethod(f *types.Func) bool {
recv := f.Type().(*types.Signature).Recv()
return recv != nil && types.IsInterface(recv.Type())
}

View File

@ -1,30 +0,0 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package typeutil
import "go/types"
// Dependencies returns all dependencies of the specified packages.
//
// Dependent packages appear in topological order: if package P imports
// package Q, Q appears earlier than P in the result.
// The algorithm follows import statements in the order they
// appear in the source code, so the result is a total order.
func Dependencies(pkgs ...*types.Package) []*types.Package {
var result []*types.Package
seen := make(map[*types.Package]bool)
var visit func(pkgs []*types.Package)
visit = func(pkgs []*types.Package) {
for _, p := range pkgs {
if !seen[p] {
seen[p] = true
visit(p.Imports())
result = append(result, p)
}
}
}
visit(pkgs)
return result
}

View File

@ -1,467 +0,0 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package typeutil defines various utilities for types, such as [Map],
// a hash table that maps [types.Type] to any value.
package typeutil
import (
"bytes"
"fmt"
"go/types"
"hash/maphash"
"unsafe"
"golang.org/x/tools/internal/typeparams"
)
// Map is a hash-table-based mapping from types (types.Type) to
// arbitrary values. The concrete types that implement
// the Type interface are pointers. Since they are not canonicalized,
// == cannot be used to check for equivalence, and thus we cannot
// simply use a Go map.
//
// Just as with map[K]V, a nil *Map is a valid empty map.
//
// Read-only map operations ([Map.At], [Map.Len], and so on) may
// safely be called concurrently.
//
// TODO(adonovan): deprecate in favor of https://go.dev/issues/69420
// and 69559, if the latter proposals for a generic hash-map type and
// a types.Hash function are accepted.
type Map struct {
table map[uint32][]entry // maps hash to bucket; entry.key==nil means unused
length int // number of map entries
}
// entry is an entry (key/value association) in a hash bucket.
type entry struct {
key types.Type
value any
}
// SetHasher has no effect.
//
// It is a relic of an optimization that is no longer profitable. Do
// not use [Hasher], [MakeHasher], or [SetHasher] in new code.
func (m *Map) SetHasher(Hasher) {}
// Delete removes the entry with the given key, if any.
// It returns true if the entry was found.
func (m *Map) Delete(key types.Type) bool {
if m != nil && m.table != nil {
hash := hash(key)
bucket := m.table[hash]
for i, e := range bucket {
if e.key != nil && types.Identical(key, e.key) {
// We can't compact the bucket as it
// would disturb iterators.
bucket[i] = entry{}
m.length--
return true
}
}
}
return false
}
// At returns the map entry for the given key.
// The result is nil if the entry is not present.
func (m *Map) At(key types.Type) any {
if m != nil && m.table != nil {
for _, e := range m.table[hash(key)] {
if e.key != nil && types.Identical(key, e.key) {
return e.value
}
}
}
return nil
}
// Set sets the map entry for key to val,
// and returns the previous entry, if any.
func (m *Map) Set(key types.Type, value any) (prev any) {
if m.table != nil {
hash := hash(key)
bucket := m.table[hash]
var hole *entry
for i, e := range bucket {
if e.key == nil {
hole = &bucket[i]
} else if types.Identical(key, e.key) {
prev = e.value
bucket[i].value = value
return
}
}
if hole != nil {
*hole = entry{key, value} // overwrite deleted entry
} else {
m.table[hash] = append(bucket, entry{key, value})
}
} else {
hash := hash(key)
m.table = map[uint32][]entry{hash: {entry{key, value}}}
}
m.length++
return
}
// Len returns the number of map entries.
func (m *Map) Len() int {
if m != nil {
return m.length
}
return 0
}
// Iterate calls function f on each entry in the map in unspecified order.
//
// If f should mutate the map, Iterate provides the same guarantees as
// Go maps: if f deletes a map entry that Iterate has not yet reached,
// f will not be invoked for it, but if f inserts a map entry that
// Iterate has not yet reached, whether or not f will be invoked for
// it is unspecified.
func (m *Map) Iterate(f func(key types.Type, value any)) {
if m != nil {
for _, bucket := range m.table {
for _, e := range bucket {
if e.key != nil {
f(e.key, e.value)
}
}
}
}
}
// Keys returns a new slice containing the set of map keys.
// The order is unspecified.
func (m *Map) Keys() []types.Type {
keys := make([]types.Type, 0, m.Len())
m.Iterate(func(key types.Type, _ any) {
keys = append(keys, key)
})
return keys
}
func (m *Map) toString(values bool) string {
if m == nil {
return "{}"
}
var buf bytes.Buffer
fmt.Fprint(&buf, "{")
sep := ""
m.Iterate(func(key types.Type, value any) {
fmt.Fprint(&buf, sep)
sep = ", "
fmt.Fprint(&buf, key)
if values {
fmt.Fprintf(&buf, ": %q", value)
}
})
fmt.Fprint(&buf, "}")
return buf.String()
}
// String returns a string representation of the map's entries.
// Values are printed using fmt.Sprintf("%v", v).
// Order is unspecified.
func (m *Map) String() string {
return m.toString(true)
}
// KeysString returns a string representation of the map's key set.
// Order is unspecified.
func (m *Map) KeysString() string {
return m.toString(false)
}
// -- Hasher --
// hash returns the hash of type t.
// TODO(adonovan): replace by types.Hash when Go proposal #69420 is accepted.
func hash(t types.Type) uint32 {
return theHasher.Hash(t)
}
// A Hasher provides a [Hasher.Hash] method to map a type to its hash value.
// Hashers are stateless, and all are equivalent.
type Hasher struct{}
var theHasher Hasher
// MakeHasher returns Hasher{}.
// Hashers are stateless; all are equivalent.
func MakeHasher() Hasher { return theHasher }
// Hash computes a hash value for the given type t such that
// Identical(t, t') => Hash(t) == Hash(t').
func (h Hasher) Hash(t types.Type) uint32 {
return hasher{inGenericSig: false}.hash(t)
}
// hasher holds the state of a single Hash traversal: whether we are
// inside the signature of a generic function; this is used to
// optimize [hasher.hashTypeParam].
type hasher struct{ inGenericSig bool }
// hashString computes the FowlerNollVo hash of s.
func hashString(s string) uint32 {
var h uint32
for i := 0; i < len(s); i++ {
h ^= uint32(s[i])
h *= 16777619
}
return h
}
// hash computes the hash of t.
func (h hasher) hash(t types.Type) uint32 {
// See Identical for rationale.
switch t := t.(type) {
case *types.Basic:
return uint32(t.Kind())
case *types.Alias:
return h.hash(types.Unalias(t))
case *types.Array:
return 9043 + 2*uint32(t.Len()) + 3*h.hash(t.Elem())
case *types.Slice:
return 9049 + 2*h.hash(t.Elem())
case *types.Struct:
var hash uint32 = 9059
for i, n := 0, t.NumFields(); i < n; i++ {
f := t.Field(i)
if f.Anonymous() {
hash += 8861
}
hash += hashString(t.Tag(i))
hash += hashString(f.Name()) // (ignore f.Pkg)
hash += h.hash(f.Type())
}
return hash
case *types.Pointer:
return 9067 + 2*h.hash(t.Elem())
case *types.Signature:
var hash uint32 = 9091
if t.Variadic() {
hash *= 8863
}
tparams := t.TypeParams()
for i := range tparams.Len() {
h.inGenericSig = true
tparam := tparams.At(i)
hash += 7 * h.hash(tparam.Constraint())
}
return hash + 3*h.hashTuple(t.Params()) + 5*h.hashTuple(t.Results())
case *types.Union:
return h.hashUnion(t)
case *types.Interface:
// Interfaces are identical if they have the same set of methods, with
// identical names and types, and they have the same set of type
// restrictions. See go/types.identical for more details.
var hash uint32 = 9103
// Hash methods.
for i, n := 0, t.NumMethods(); i < n; i++ {
// Method order is not significant.
// Ignore m.Pkg().
m := t.Method(i)
// Use shallow hash on method signature to
// avoid anonymous interface cycles.
hash += 3*hashString(m.Name()) + 5*h.shallowHash(m.Type())
}
// Hash type restrictions.
terms, err := typeparams.InterfaceTermSet(t)
// if err != nil t has invalid type restrictions.
if err == nil {
hash += h.hashTermSet(terms)
}
return hash
case *types.Map:
return 9109 + 2*h.hash(t.Key()) + 3*h.hash(t.Elem())
case *types.Chan:
return 9127 + 2*uint32(t.Dir()) + 3*h.hash(t.Elem())
case *types.Named:
hash := h.hashTypeName(t.Obj())
targs := t.TypeArgs()
for i := 0; i < targs.Len(); i++ {
targ := targs.At(i)
hash += 2 * h.hash(targ)
}
return hash
case *types.TypeParam:
return h.hashTypeParam(t)
case *types.Tuple:
return h.hashTuple(t)
}
panic(fmt.Sprintf("%T: %v", t, t))
}
func (h hasher) hashTuple(tuple *types.Tuple) uint32 {
// See go/types.identicalTypes for rationale.
n := tuple.Len()
hash := 9137 + 2*uint32(n)
for i := range n {
hash += 3 * h.hash(tuple.At(i).Type())
}
return hash
}
func (h hasher) hashUnion(t *types.Union) uint32 {
// Hash type restrictions.
terms, err := typeparams.UnionTermSet(t)
// if err != nil t has invalid type restrictions. Fall back on a non-zero
// hash.
if err != nil {
return 9151
}
return h.hashTermSet(terms)
}
func (h hasher) hashTermSet(terms []*types.Term) uint32 {
hash := 9157 + 2*uint32(len(terms))
for _, term := range terms {
// term order is not significant.
termHash := h.hash(term.Type())
if term.Tilde() {
termHash *= 9161
}
hash += 3 * termHash
}
return hash
}
// hashTypeParam returns the hash of a type parameter.
func (h hasher) hashTypeParam(t *types.TypeParam) uint32 {
// Within the signature of a generic function, TypeParams are
// identical if they have the same index and constraint, so we
// hash them based on index.
//
// When we are outside a generic function, free TypeParams are
// identical iff they are the same object, so we can use a
// more discriminating hash consistent with object identity.
// This optimization saves [Map] about 4% when hashing all the
// types.Info.Types in the forward closure of net/http.
if !h.inGenericSig {
// Optimization: outside a generic function signature,
// use a more discrimating hash consistent with object identity.
return h.hashTypeName(t.Obj())
}
return 9173 + 3*uint32(t.Index())
}
var theSeed = maphash.MakeSeed()
// hashTypeName hashes the pointer of tname.
func (hasher) hashTypeName(tname *types.TypeName) uint32 {
// Since types.Identical uses == to compare TypeNames,
// the Hash function uses maphash.Comparable.
// TODO(adonovan): or will, when it becomes available in go1.24.
// In the meantime we use the pointer's numeric value.
//
// hash := maphash.Comparable(theSeed, tname)
//
// (Another approach would be to hash the name and package
// path, and whether or not it is a package-level typename. It
// is rare for a package to define multiple local types with
// the same name.)
hash := uintptr(unsafe.Pointer(tname))
return uint32(hash ^ (hash >> 32))
}
// shallowHash computes a hash of t without looking at any of its
// element Types, to avoid potential anonymous cycles in the types of
// interface methods.
//
// When an unnamed non-empty interface type appears anywhere among the
// arguments or results of an interface method, there is a potential
// for endless recursion. Consider:
//
// type X interface { m() []*interface { X } }
//
// The problem is that the Methods of the interface in m's result type
// include m itself; there is no mention of the named type X that
// might help us break the cycle.
// (See comment in go/types.identical, case *Interface, for more.)
func (h hasher) shallowHash(t types.Type) uint32 {
// t is the type of an interface method (Signature),
// its params or results (Tuples), or their immediate
// elements (mostly Slice, Pointer, Basic, Named),
// so there's no need to optimize anything else.
switch t := t.(type) {
case *types.Alias:
return h.shallowHash(types.Unalias(t))
case *types.Signature:
var hash uint32 = 604171
if t.Variadic() {
hash *= 971767
}
// The Signature/Tuple recursion is always finite
// and invariably shallow.
return hash + 1062599*h.shallowHash(t.Params()) + 1282529*h.shallowHash(t.Results())
case *types.Tuple:
n := t.Len()
hash := 9137 + 2*uint32(n)
for i := range n {
hash += 53471161 * h.shallowHash(t.At(i).Type())
}
return hash
case *types.Basic:
return 45212177 * uint32(t.Kind())
case *types.Array:
return 1524181 + 2*uint32(t.Len())
case *types.Slice:
return 2690201
case *types.Struct:
return 3326489
case *types.Pointer:
return 4393139
case *types.Union:
return 562448657
case *types.Interface:
return 2124679 // no recursion here
case *types.Map:
return 9109
case *types.Chan:
return 9127
case *types.Named:
return h.hashTypeName(t.Obj())
case *types.TypeParam:
return h.hashTypeParam(t)
}
panic(fmt.Sprintf("shallowHash: %T: %v", t, t))
}

View File

@ -1,71 +0,0 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This file implements a cache of method sets.
package typeutil
import (
"go/types"
"sync"
)
// A MethodSetCache records the method set of each type T for which
// MethodSet(T) is called so that repeat queries are fast.
// The zero value is a ready-to-use cache instance.
type MethodSetCache struct {
mu sync.Mutex
named map[*types.Named]struct{ value, pointer *types.MethodSet } // method sets for named N and *N
others map[types.Type]*types.MethodSet // all other types
}
// MethodSet returns the method set of type T. It is thread-safe.
//
// If cache is nil, this function is equivalent to types.NewMethodSet(T).
// Utility functions can thus expose an optional *MethodSetCache
// parameter to clients that care about performance.
func (cache *MethodSetCache) MethodSet(T types.Type) *types.MethodSet {
if cache == nil {
return types.NewMethodSet(T)
}
cache.mu.Lock()
defer cache.mu.Unlock()
switch T := types.Unalias(T).(type) {
case *types.Named:
return cache.lookupNamed(T).value
case *types.Pointer:
if N, ok := types.Unalias(T.Elem()).(*types.Named); ok {
return cache.lookupNamed(N).pointer
}
}
// all other types
// (The map uses pointer equivalence, not type identity.)
mset := cache.others[T]
if mset == nil {
mset = types.NewMethodSet(T)
if cache.others == nil {
cache.others = make(map[types.Type]*types.MethodSet)
}
cache.others[T] = mset
}
return mset
}
func (cache *MethodSetCache) lookupNamed(named *types.Named) struct{ value, pointer *types.MethodSet } {
if cache.named == nil {
cache.named = make(map[*types.Named]struct{ value, pointer *types.MethodSet })
}
// Avoid recomputing mset(*T) for each distinct Pointer
// instance whose underlying type is a named type.
msets, ok := cache.named[named]
if !ok {
msets.value = types.NewMethodSet(named)
msets.pointer = types.NewMethodSet(types.NewPointer(named))
cache.named[named] = msets
}
return msets
}

View File

@ -1,53 +0,0 @@
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package typeutil
// This file defines utilities for user interfaces that display types.
import (
"go/types"
)
// IntuitiveMethodSet returns the intuitive method set of a type T,
// which is the set of methods you can call on an addressable value of
// that type.
//
// The result always contains MethodSet(T), and is exactly MethodSet(T)
// for interface types and for pointer-to-concrete types.
// For all other concrete types T, the result additionally
// contains each method belonging to *T if there is no identically
// named method on T itself.
//
// This corresponds to user intuition about method sets;
// this function is intended only for user interfaces.
//
// The order of the result is as for types.MethodSet(T).
func IntuitiveMethodSet(T types.Type, msets *MethodSetCache) []*types.Selection {
isPointerToConcrete := func(T types.Type) bool {
ptr, ok := types.Unalias(T).(*types.Pointer)
return ok && !types.IsInterface(ptr.Elem())
}
var result []*types.Selection
mset := msets.MethodSet(T)
if types.IsInterface(T) || isPointerToConcrete(T) {
for i, n := 0, mset.Len(); i < n; i++ {
result = append(result, mset.At(i))
}
} else {
// T is some other concrete type.
// Report methods of T and *T, preferring those of T.
pmset := msets.MethodSet(types.NewPointer(T))
for i, n := 0, pmset.Len(); i < n; i++ {
meth := pmset.At(i)
if m := mset.Lookup(meth.Obj().Pkg(), meth.Obj().Name()); m != nil {
meth = m
}
result = append(result, meth)
}
}
return result
}

View File

@ -1,38 +0,0 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package aliases
import (
"go/token"
"go/types"
)
// Package aliases defines backward compatible shims
// for the types.Alias type representation added in 1.22.
// This defines placeholders for x/tools until 1.26.
// NewAlias creates a new TypeName in Package pkg that
// is an alias for the type rhs.
//
// The enabled parameter determines whether the resulting [TypeName]'s
// type is an [types.Alias]. Its value must be the result of a call to
// [Enabled], which computes the effective value of
// GODEBUG=gotypesalias=... by invoking the type checker. The Enabled
// function is expensive and should be called once per task (e.g.
// package import), not once per call to NewAlias.
//
// Precondition: enabled || len(tparams)==0.
// If materialized aliases are disabled, there must not be any type parameters.
func NewAlias(enabled bool, pos token.Pos, pkg *types.Package, name string, rhs types.Type, tparams []*types.TypeParam) *types.TypeName {
if enabled {
tname := types.NewTypeName(pos, pkg, name, nil)
SetTypeParams(types.NewAlias(tname, rhs), tparams)
return tname
}
if len(tparams) > 0 {
panic("cannot create an alias with type parameters when gotypesalias is not enabled")
}
return types.NewTypeName(pos, pkg, name, rhs)
}

View File

@ -1,80 +0,0 @@
// Copyright 2024 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package aliases
import (
"go/ast"
"go/parser"
"go/token"
"go/types"
)
// Rhs returns the type on the right-hand side of the alias declaration.
func Rhs(alias *types.Alias) types.Type {
if alias, ok := any(alias).(interface{ Rhs() types.Type }); ok {
return alias.Rhs() // go1.23+
}
// go1.22's Alias didn't have the Rhs method,
// so Unalias is the best we can do.
return types.Unalias(alias)
}
// TypeParams returns the type parameter list of the alias.
func TypeParams(alias *types.Alias) *types.TypeParamList {
if alias, ok := any(alias).(interface{ TypeParams() *types.TypeParamList }); ok {
return alias.TypeParams() // go1.23+
}
return nil
}
// SetTypeParams sets the type parameters of the alias type.
func SetTypeParams(alias *types.Alias, tparams []*types.TypeParam) {
if alias, ok := any(alias).(interface {
SetTypeParams(tparams []*types.TypeParam)
}); ok {
alias.SetTypeParams(tparams) // go1.23+
} else if len(tparams) > 0 {
panic("cannot set type parameters of an Alias type in go1.22")
}
}
// TypeArgs returns the type arguments used to instantiate the Alias type.
func TypeArgs(alias *types.Alias) *types.TypeList {
if alias, ok := any(alias).(interface{ TypeArgs() *types.TypeList }); ok {
return alias.TypeArgs() // go1.23+
}
return nil // empty (go1.22)
}
// Origin returns the generic Alias type of which alias is an instance.
// If alias is not an instance of a generic alias, Origin returns alias.
func Origin(alias *types.Alias) *types.Alias {
if alias, ok := any(alias).(interface{ Origin() *types.Alias }); ok {
return alias.Origin() // go1.23+
}
return alias // not an instance of a generic alias (go1.22)
}
// Enabled reports whether [NewAlias] should create [types.Alias] types.
//
// This function is expensive! Call it sparingly.
func Enabled() bool {
// The only reliable way to compute the answer is to invoke go/types.
// We don't parse the GODEBUG environment variable, because
// (a) it's tricky to do so in a manner that is consistent
// with the godebug package; in particular, a simple
// substring check is not good enough. The value is a
// rightmost-wins list of options. But more importantly:
// (b) it is impossible to detect changes to the effective
// setting caused by os.Setenv("GODEBUG"), as happens in
// many tests. Therefore any attempt to cache the result
// is just incorrect.
fset := token.NewFileSet()
f, _ := parser.ParseFile(fset, "a.go", "package p; type A = int", parser.SkipObjectResolution)
pkg, _ := new(types.Config).Check("p", fset, []*ast.File{f}, nil)
_, enabled := pkg.Scope().Lookup("A").Type().(*types.Alias)
return enabled
}

View File

@ -1,85 +0,0 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package core provides support for event based telemetry.
package core
import (
"fmt"
"time"
"golang.org/x/tools/internal/event/label"
)
// Event holds the information about an event of note that occurred.
type Event struct {
at time.Time
// As events are often on the stack, storing the first few labels directly
// in the event can avoid an allocation at all for the very common cases of
// simple events.
// The length needs to be large enough to cope with the majority of events
// but no so large as to cause undue stack pressure.
// A log message with two values will use 3 labels (one for each value and
// one for the message itself).
static [3]label.Label // inline storage for the first few labels
dynamic []label.Label // dynamically sized storage for remaining labels
}
// eventLabelMap implements label.Map for a the labels of an Event.
type eventLabelMap struct {
event Event
}
func (ev Event) At() time.Time { return ev.at }
func (ev Event) Format(f fmt.State, r rune) {
if !ev.at.IsZero() {
fmt.Fprint(f, ev.at.Format("2006/01/02 15:04:05 "))
}
for index := 0; ev.Valid(index); index++ {
if l := ev.Label(index); l.Valid() {
fmt.Fprintf(f, "\n\t%v", l)
}
}
}
func (ev Event) Valid(index int) bool {
return index >= 0 && index < len(ev.static)+len(ev.dynamic)
}
func (ev Event) Label(index int) label.Label {
if index < len(ev.static) {
return ev.static[index]
}
return ev.dynamic[index-len(ev.static)]
}
func (ev Event) Find(key label.Key) label.Label {
for _, l := range ev.static {
if l.Key() == key {
return l
}
}
for _, l := range ev.dynamic {
if l.Key() == key {
return l
}
}
return label.Label{}
}
func MakeEvent(static [3]label.Label, labels []label.Label) Event {
return Event{
static: static,
dynamic: labels,
}
}
// CloneEvent event returns a copy of the event with the time adjusted to at.
func CloneEvent(ev Event, at time.Time) Event {
ev.at = at
return ev
}

View File

@ -1,70 +0,0 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package core
import (
"context"
"sync/atomic"
"time"
"unsafe"
"golang.org/x/tools/internal/event/label"
)
// Exporter is a function that handles events.
// It may return a modified context and event.
type Exporter func(context.Context, Event, label.Map) context.Context
var (
exporter unsafe.Pointer
)
// SetExporter sets the global exporter function that handles all events.
// The exporter is called synchronously from the event call site, so it should
// return quickly so as not to hold up user code.
func SetExporter(e Exporter) {
p := unsafe.Pointer(&e)
if e == nil {
// &e is always valid, and so p is always valid, but for the early abort
// of ProcessEvent to be efficient it needs to make the nil check on the
// pointer without having to dereference it, so we make the nil function
// also a nil pointer
p = nil
}
atomic.StorePointer(&exporter, p)
}
// deliver is called to deliver an event to the supplied exporter.
// it will fill in the time.
func deliver(ctx context.Context, exporter Exporter, ev Event) context.Context {
// add the current time to the event
ev.at = time.Now()
// hand the event off to the current exporter
return exporter(ctx, ev, ev)
}
// Export is called to deliver an event to the global exporter if set.
func Export(ctx context.Context, ev Event) context.Context {
// get the global exporter and abort early if there is not one
exporterPtr := (*Exporter)(atomic.LoadPointer(&exporter))
if exporterPtr == nil {
return ctx
}
return deliver(ctx, *exporterPtr, ev)
}
// ExportPair is called to deliver a start event to the supplied exporter.
// It also returns a function that will deliver the end event to the same
// exporter.
// It will fill in the time.
func ExportPair(ctx context.Context, begin, end Event) (context.Context, func()) {
// get the global exporter and abort early if there is not one
exporterPtr := (*Exporter)(atomic.LoadPointer(&exporter))
if exporterPtr == nil {
return ctx, func() {}
}
ctx = deliver(ctx, *exporterPtr, begin)
return ctx, func() { deliver(ctx, *exporterPtr, end) }
}

View File

@ -1,77 +0,0 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package core
import (
"context"
"golang.org/x/tools/internal/event/keys"
"golang.org/x/tools/internal/event/label"
)
// Log1 takes a message and one label delivers a log event to the exporter.
// It is a customized version of Print that is faster and does no allocation.
func Log1(ctx context.Context, message string, t1 label.Label) {
Export(ctx, MakeEvent([3]label.Label{
keys.Msg.Of(message),
t1,
}, nil))
}
// Log2 takes a message and two labels and delivers a log event to the exporter.
// It is a customized version of Print that is faster and does no allocation.
func Log2(ctx context.Context, message string, t1 label.Label, t2 label.Label) {
Export(ctx, MakeEvent([3]label.Label{
keys.Msg.Of(message),
t1,
t2,
}, nil))
}
// Metric1 sends a label event to the exporter with the supplied labels.
func Metric1(ctx context.Context, t1 label.Label) context.Context {
return Export(ctx, MakeEvent([3]label.Label{
keys.Metric.New(),
t1,
}, nil))
}
// Metric2 sends a label event to the exporter with the supplied labels.
func Metric2(ctx context.Context, t1, t2 label.Label) context.Context {
return Export(ctx, MakeEvent([3]label.Label{
keys.Metric.New(),
t1,
t2,
}, nil))
}
// Start1 sends a span start event with the supplied label list to the exporter.
// It also returns a function that will end the span, which should normally be
// deferred.
func Start1(ctx context.Context, name string, t1 label.Label) (context.Context, func()) {
return ExportPair(ctx,
MakeEvent([3]label.Label{
keys.Start.Of(name),
t1,
}, nil),
MakeEvent([3]label.Label{
keys.End.New(),
}, nil))
}
// Start2 sends a span start event with the supplied label list to the exporter.
// It also returns a function that will end the span, which should normally be
// deferred.
func Start2(ctx context.Context, name string, t1, t2 label.Label) (context.Context, func()) {
return ExportPair(ctx,
MakeEvent([3]label.Label{
keys.Start.Of(name),
t1,
t2,
}, nil),
MakeEvent([3]label.Label{
keys.End.New(),
}, nil))
}

View File

@ -1,7 +0,0 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package event provides a set of packages that cover the main
// concepts of telemetry in an implementation agnostic way.
package event

View File

@ -1,127 +0,0 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package event
import (
"context"
"golang.org/x/tools/internal/event/core"
"golang.org/x/tools/internal/event/keys"
"golang.org/x/tools/internal/event/label"
)
// Exporter is a function that handles events.
// It may return a modified context and event.
type Exporter func(context.Context, core.Event, label.Map) context.Context
// SetExporter sets the global exporter function that handles all events.
// The exporter is called synchronously from the event call site, so it should
// return quickly so as not to hold up user code.
func SetExporter(e Exporter) {
core.SetExporter(core.Exporter(e))
}
// Log takes a message and a label list and combines them into a single event
// before delivering them to the exporter.
func Log(ctx context.Context, message string, labels ...label.Label) {
core.Export(ctx, core.MakeEvent([3]label.Label{
keys.Msg.Of(message),
}, labels))
}
// IsLog returns true if the event was built by the Log function.
// It is intended to be used in exporters to identify the semantics of the
// event when deciding what to do with it.
func IsLog(ev core.Event) bool {
return ev.Label(0).Key() == keys.Msg
}
// Error takes a message and a label list and combines them into a single event
// before delivering them to the exporter. It captures the error in the
// delivered event.
func Error(ctx context.Context, message string, err error, labels ...label.Label) {
core.Export(ctx, core.MakeEvent([3]label.Label{
keys.Msg.Of(message),
keys.Err.Of(err),
}, labels))
}
// IsError returns true if the event was built by the Error function.
// It is intended to be used in exporters to identify the semantics of the
// event when deciding what to do with it.
func IsError(ev core.Event) bool {
return ev.Label(0).Key() == keys.Msg &&
ev.Label(1).Key() == keys.Err
}
// Metric sends a label event to the exporter with the supplied labels.
func Metric(ctx context.Context, labels ...label.Label) {
core.Export(ctx, core.MakeEvent([3]label.Label{
keys.Metric.New(),
}, labels))
}
// IsMetric returns true if the event was built by the Metric function.
// It is intended to be used in exporters to identify the semantics of the
// event when deciding what to do with it.
func IsMetric(ev core.Event) bool {
return ev.Label(0).Key() == keys.Metric
}
// Label sends a label event to the exporter with the supplied labels.
func Label(ctx context.Context, labels ...label.Label) context.Context {
return core.Export(ctx, core.MakeEvent([3]label.Label{
keys.Label.New(),
}, labels))
}
// IsLabel returns true if the event was built by the Label function.
// It is intended to be used in exporters to identify the semantics of the
// event when deciding what to do with it.
func IsLabel(ev core.Event) bool {
return ev.Label(0).Key() == keys.Label
}
// Start sends a span start event with the supplied label list to the exporter.
// It also returns a function that will end the span, which should normally be
// deferred.
func Start(ctx context.Context, name string, labels ...label.Label) (context.Context, func()) {
return core.ExportPair(ctx,
core.MakeEvent([3]label.Label{
keys.Start.Of(name),
}, labels),
core.MakeEvent([3]label.Label{
keys.End.New(),
}, nil))
}
// IsStart returns true if the event was built by the Start function.
// It is intended to be used in exporters to identify the semantics of the
// event when deciding what to do with it.
func IsStart(ev core.Event) bool {
return ev.Label(0).Key() == keys.Start
}
// IsEnd returns true if the event was built by the End function.
// It is intended to be used in exporters to identify the semantics of the
// event when deciding what to do with it.
func IsEnd(ev core.Event) bool {
return ev.Label(0).Key() == keys.End
}
// Detach returns a context without an associated span.
// This allows the creation of spans that are not children of the current span.
func Detach(ctx context.Context) context.Context {
return core.Export(ctx, core.MakeEvent([3]label.Label{
keys.Detach.New(),
}, nil))
}
// IsDetach returns true if the event was built by the Detach function.
// It is intended to be used in exporters to identify the semantics of the
// event when deciding what to do with it.
func IsDetach(ev core.Event) bool {
return ev.Label(0).Key() == keys.Detach
}

View File

@ -1,564 +0,0 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package keys
import (
"fmt"
"io"
"math"
"strconv"
"golang.org/x/tools/internal/event/label"
)
// Value represents a key for untyped values.
type Value struct {
name string
description string
}
// New creates a new Key for untyped values.
func New(name, description string) *Value {
return &Value{name: name, description: description}
}
func (k *Value) Name() string { return k.name }
func (k *Value) Description() string { return k.description }
func (k *Value) Format(w io.Writer, buf []byte, l label.Label) {
fmt.Fprint(w, k.From(l))
}
// Get can be used to get a label for the key from a label.Map.
func (k *Value) Get(lm label.Map) interface{} {
if t := lm.Find(k); t.Valid() {
return k.From(t)
}
return nil
}
// From can be used to get a value from a Label.
func (k *Value) From(t label.Label) interface{} { return t.UnpackValue() }
// Of creates a new Label with this key and the supplied value.
func (k *Value) Of(value interface{}) label.Label { return label.OfValue(k, value) }
// Tag represents a key for tagging labels that have no value.
// These are used when the existence of the label is the entire information it
// carries, such as marking events to be of a specific kind, or from a specific
// package.
type Tag struct {
name string
description string
}
// NewTag creates a new Key for tagging labels.
func NewTag(name, description string) *Tag {
return &Tag{name: name, description: description}
}
func (k *Tag) Name() string { return k.name }
func (k *Tag) Description() string { return k.description }
func (k *Tag) Format(w io.Writer, buf []byte, l label.Label) {}
// New creates a new Label with this key.
func (k *Tag) New() label.Label { return label.OfValue(k, nil) }
// Int represents a key
type Int struct {
name string
description string
}
// NewInt creates a new Key for int values.
func NewInt(name, description string) *Int {
return &Int{name: name, description: description}
}
func (k *Int) Name() string { return k.name }
func (k *Int) Description() string { return k.description }
func (k *Int) Format(w io.Writer, buf []byte, l label.Label) {
w.Write(strconv.AppendInt(buf, int64(k.From(l)), 10))
}
// Of creates a new Label with this key and the supplied value.
func (k *Int) Of(v int) label.Label { return label.Of64(k, uint64(v)) }
// Get can be used to get a label for the key from a label.Map.
func (k *Int) Get(lm label.Map) int {
if t := lm.Find(k); t.Valid() {
return k.From(t)
}
return 0
}
// From can be used to get a value from a Label.
func (k *Int) From(t label.Label) int { return int(t.Unpack64()) }
// Int8 represents a key
type Int8 struct {
name string
description string
}
// NewInt8 creates a new Key for int8 values.
func NewInt8(name, description string) *Int8 {
return &Int8{name: name, description: description}
}
func (k *Int8) Name() string { return k.name }
func (k *Int8) Description() string { return k.description }
func (k *Int8) Format(w io.Writer, buf []byte, l label.Label) {
w.Write(strconv.AppendInt(buf, int64(k.From(l)), 10))
}
// Of creates a new Label with this key and the supplied value.
func (k *Int8) Of(v int8) label.Label { return label.Of64(k, uint64(v)) }
// Get can be used to get a label for the key from a label.Map.
func (k *Int8) Get(lm label.Map) int8 {
if t := lm.Find(k); t.Valid() {
return k.From(t)
}
return 0
}
// From can be used to get a value from a Label.
func (k *Int8) From(t label.Label) int8 { return int8(t.Unpack64()) }
// Int16 represents a key
type Int16 struct {
name string
description string
}
// NewInt16 creates a new Key for int16 values.
func NewInt16(name, description string) *Int16 {
return &Int16{name: name, description: description}
}
func (k *Int16) Name() string { return k.name }
func (k *Int16) Description() string { return k.description }
func (k *Int16) Format(w io.Writer, buf []byte, l label.Label) {
w.Write(strconv.AppendInt(buf, int64(k.From(l)), 10))
}
// Of creates a new Label with this key and the supplied value.
func (k *Int16) Of(v int16) label.Label { return label.Of64(k, uint64(v)) }
// Get can be used to get a label for the key from a label.Map.
func (k *Int16) Get(lm label.Map) int16 {
if t := lm.Find(k); t.Valid() {
return k.From(t)
}
return 0
}
// From can be used to get a value from a Label.
func (k *Int16) From(t label.Label) int16 { return int16(t.Unpack64()) }
// Int32 represents a key
type Int32 struct {
name string
description string
}
// NewInt32 creates a new Key for int32 values.
func NewInt32(name, description string) *Int32 {
return &Int32{name: name, description: description}
}
func (k *Int32) Name() string { return k.name }
func (k *Int32) Description() string { return k.description }
func (k *Int32) Format(w io.Writer, buf []byte, l label.Label) {
w.Write(strconv.AppendInt(buf, int64(k.From(l)), 10))
}
// Of creates a new Label with this key and the supplied value.
func (k *Int32) Of(v int32) label.Label { return label.Of64(k, uint64(v)) }
// Get can be used to get a label for the key from a label.Map.
func (k *Int32) Get(lm label.Map) int32 {
if t := lm.Find(k); t.Valid() {
return k.From(t)
}
return 0
}
// From can be used to get a value from a Label.
func (k *Int32) From(t label.Label) int32 { return int32(t.Unpack64()) }
// Int64 represents a key
type Int64 struct {
name string
description string
}
// NewInt64 creates a new Key for int64 values.
func NewInt64(name, description string) *Int64 {
return &Int64{name: name, description: description}
}
func (k *Int64) Name() string { return k.name }
func (k *Int64) Description() string { return k.description }
func (k *Int64) Format(w io.Writer, buf []byte, l label.Label) {
w.Write(strconv.AppendInt(buf, k.From(l), 10))
}
// Of creates a new Label with this key and the supplied value.
func (k *Int64) Of(v int64) label.Label { return label.Of64(k, uint64(v)) }
// Get can be used to get a label for the key from a label.Map.
func (k *Int64) Get(lm label.Map) int64 {
if t := lm.Find(k); t.Valid() {
return k.From(t)
}
return 0
}
// From can be used to get a value from a Label.
func (k *Int64) From(t label.Label) int64 { return int64(t.Unpack64()) }
// UInt represents a key
type UInt struct {
name string
description string
}
// NewUInt creates a new Key for uint values.
func NewUInt(name, description string) *UInt {
return &UInt{name: name, description: description}
}
func (k *UInt) Name() string { return k.name }
func (k *UInt) Description() string { return k.description }
func (k *UInt) Format(w io.Writer, buf []byte, l label.Label) {
w.Write(strconv.AppendUint(buf, uint64(k.From(l)), 10))
}
// Of creates a new Label with this key and the supplied value.
func (k *UInt) Of(v uint) label.Label { return label.Of64(k, uint64(v)) }
// Get can be used to get a label for the key from a label.Map.
func (k *UInt) Get(lm label.Map) uint {
if t := lm.Find(k); t.Valid() {
return k.From(t)
}
return 0
}
// From can be used to get a value from a Label.
func (k *UInt) From(t label.Label) uint { return uint(t.Unpack64()) }
// UInt8 represents a key
type UInt8 struct {
name string
description string
}
// NewUInt8 creates a new Key for uint8 values.
func NewUInt8(name, description string) *UInt8 {
return &UInt8{name: name, description: description}
}
func (k *UInt8) Name() string { return k.name }
func (k *UInt8) Description() string { return k.description }
func (k *UInt8) Format(w io.Writer, buf []byte, l label.Label) {
w.Write(strconv.AppendUint(buf, uint64(k.From(l)), 10))
}
// Of creates a new Label with this key and the supplied value.
func (k *UInt8) Of(v uint8) label.Label { return label.Of64(k, uint64(v)) }
// Get can be used to get a label for the key from a label.Map.
func (k *UInt8) Get(lm label.Map) uint8 {
if t := lm.Find(k); t.Valid() {
return k.From(t)
}
return 0
}
// From can be used to get a value from a Label.
func (k *UInt8) From(t label.Label) uint8 { return uint8(t.Unpack64()) }
// UInt16 represents a key
type UInt16 struct {
name string
description string
}
// NewUInt16 creates a new Key for uint16 values.
func NewUInt16(name, description string) *UInt16 {
return &UInt16{name: name, description: description}
}
func (k *UInt16) Name() string { return k.name }
func (k *UInt16) Description() string { return k.description }
func (k *UInt16) Format(w io.Writer, buf []byte, l label.Label) {
w.Write(strconv.AppendUint(buf, uint64(k.From(l)), 10))
}
// Of creates a new Label with this key and the supplied value.
func (k *UInt16) Of(v uint16) label.Label { return label.Of64(k, uint64(v)) }
// Get can be used to get a label for the key from a label.Map.
func (k *UInt16) Get(lm label.Map) uint16 {
if t := lm.Find(k); t.Valid() {
return k.From(t)
}
return 0
}
// From can be used to get a value from a Label.
func (k *UInt16) From(t label.Label) uint16 { return uint16(t.Unpack64()) }
// UInt32 represents a key
type UInt32 struct {
name string
description string
}
// NewUInt32 creates a new Key for uint32 values.
func NewUInt32(name, description string) *UInt32 {
return &UInt32{name: name, description: description}
}
func (k *UInt32) Name() string { return k.name }
func (k *UInt32) Description() string { return k.description }
func (k *UInt32) Format(w io.Writer, buf []byte, l label.Label) {
w.Write(strconv.AppendUint(buf, uint64(k.From(l)), 10))
}
// Of creates a new Label with this key and the supplied value.
func (k *UInt32) Of(v uint32) label.Label { return label.Of64(k, uint64(v)) }
// Get can be used to get a label for the key from a label.Map.
func (k *UInt32) Get(lm label.Map) uint32 {
if t := lm.Find(k); t.Valid() {
return k.From(t)
}
return 0
}
// From can be used to get a value from a Label.
func (k *UInt32) From(t label.Label) uint32 { return uint32(t.Unpack64()) }
// UInt64 represents a key
type UInt64 struct {
name string
description string
}
// NewUInt64 creates a new Key for uint64 values.
func NewUInt64(name, description string) *UInt64 {
return &UInt64{name: name, description: description}
}
func (k *UInt64) Name() string { return k.name }
func (k *UInt64) Description() string { return k.description }
func (k *UInt64) Format(w io.Writer, buf []byte, l label.Label) {
w.Write(strconv.AppendUint(buf, k.From(l), 10))
}
// Of creates a new Label with this key and the supplied value.
func (k *UInt64) Of(v uint64) label.Label { return label.Of64(k, v) }
// Get can be used to get a label for the key from a label.Map.
func (k *UInt64) Get(lm label.Map) uint64 {
if t := lm.Find(k); t.Valid() {
return k.From(t)
}
return 0
}
// From can be used to get a value from a Label.
func (k *UInt64) From(t label.Label) uint64 { return t.Unpack64() }
// Float32 represents a key
type Float32 struct {
name string
description string
}
// NewFloat32 creates a new Key for float32 values.
func NewFloat32(name, description string) *Float32 {
return &Float32{name: name, description: description}
}
func (k *Float32) Name() string { return k.name }
func (k *Float32) Description() string { return k.description }
func (k *Float32) Format(w io.Writer, buf []byte, l label.Label) {
w.Write(strconv.AppendFloat(buf, float64(k.From(l)), 'E', -1, 32))
}
// Of creates a new Label with this key and the supplied value.
func (k *Float32) Of(v float32) label.Label {
return label.Of64(k, uint64(math.Float32bits(v)))
}
// Get can be used to get a label for the key from a label.Map.
func (k *Float32) Get(lm label.Map) float32 {
if t := lm.Find(k); t.Valid() {
return k.From(t)
}
return 0
}
// From can be used to get a value from a Label.
func (k *Float32) From(t label.Label) float32 {
return math.Float32frombits(uint32(t.Unpack64()))
}
// Float64 represents a key
type Float64 struct {
name string
description string
}
// NewFloat64 creates a new Key for int64 values.
func NewFloat64(name, description string) *Float64 {
return &Float64{name: name, description: description}
}
func (k *Float64) Name() string { return k.name }
func (k *Float64) Description() string { return k.description }
func (k *Float64) Format(w io.Writer, buf []byte, l label.Label) {
w.Write(strconv.AppendFloat(buf, k.From(l), 'E', -1, 64))
}
// Of creates a new Label with this key and the supplied value.
func (k *Float64) Of(v float64) label.Label {
return label.Of64(k, math.Float64bits(v))
}
// Get can be used to get a label for the key from a label.Map.
func (k *Float64) Get(lm label.Map) float64 {
if t := lm.Find(k); t.Valid() {
return k.From(t)
}
return 0
}
// From can be used to get a value from a Label.
func (k *Float64) From(t label.Label) float64 {
return math.Float64frombits(t.Unpack64())
}
// String represents a key
type String struct {
name string
description string
}
// NewString creates a new Key for int64 values.
func NewString(name, description string) *String {
return &String{name: name, description: description}
}
func (k *String) Name() string { return k.name }
func (k *String) Description() string { return k.description }
func (k *String) Format(w io.Writer, buf []byte, l label.Label) {
w.Write(strconv.AppendQuote(buf, k.From(l)))
}
// Of creates a new Label with this key and the supplied value.
func (k *String) Of(v string) label.Label { return label.OfString(k, v) }
// Get can be used to get a label for the key from a label.Map.
func (k *String) Get(lm label.Map) string {
if t := lm.Find(k); t.Valid() {
return k.From(t)
}
return ""
}
// From can be used to get a value from a Label.
func (k *String) From(t label.Label) string { return t.UnpackString() }
// Boolean represents a key
type Boolean struct {
name string
description string
}
// NewBoolean creates a new Key for bool values.
func NewBoolean(name, description string) *Boolean {
return &Boolean{name: name, description: description}
}
func (k *Boolean) Name() string { return k.name }
func (k *Boolean) Description() string { return k.description }
func (k *Boolean) Format(w io.Writer, buf []byte, l label.Label) {
w.Write(strconv.AppendBool(buf, k.From(l)))
}
// Of creates a new Label with this key and the supplied value.
func (k *Boolean) Of(v bool) label.Label {
if v {
return label.Of64(k, 1)
}
return label.Of64(k, 0)
}
// Get can be used to get a label for the key from a label.Map.
func (k *Boolean) Get(lm label.Map) bool {
if t := lm.Find(k); t.Valid() {
return k.From(t)
}
return false
}
// From can be used to get a value from a Label.
func (k *Boolean) From(t label.Label) bool { return t.Unpack64() > 0 }
// Error represents a key
type Error struct {
name string
description string
}
// NewError creates a new Key for int64 values.
func NewError(name, description string) *Error {
return &Error{name: name, description: description}
}
func (k *Error) Name() string { return k.name }
func (k *Error) Description() string { return k.description }
func (k *Error) Format(w io.Writer, buf []byte, l label.Label) {
io.WriteString(w, k.From(l).Error())
}
// Of creates a new Label with this key and the supplied value.
func (k *Error) Of(v error) label.Label { return label.OfValue(k, v) }
// Get can be used to get a label for the key from a label.Map.
func (k *Error) Get(lm label.Map) error {
if t := lm.Find(k); t.Valid() {
return k.From(t)
}
return nil
}
// From can be used to get a value from a Label.
func (k *Error) From(t label.Label) error {
err, _ := t.UnpackValue().(error)
return err
}

View File

@ -1,22 +0,0 @@
// Copyright 2020 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package keys
var (
// Msg is a key used to add message strings to label lists.
Msg = NewString("message", "a readable message")
// Label is a key used to indicate an event adds labels to the context.
Label = NewTag("label", "a label context marker")
// Start is used for things like traces that have a name.
Start = NewString("start", "span start")
// Metric is a key used to indicate an event records metrics.
End = NewTag("end", "a span end marker")
// Metric is a key used to indicate an event records metrics.
Detach = NewTag("detach", "a span detach marker")
// Err is a key used to add error values to label lists.
Err = NewError("error", "an error that occurred")
// Metric is a key used to indicate an event records metrics.
Metric = NewTag("metric", "a metric event marker")
)

View File

@ -1,21 +0,0 @@
// Copyright 2023 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package keys
import (
"sort"
"strings"
)
// Join returns a canonical join of the keys in S:
// a sorted comma-separated string list.
func Join[S ~[]T, T ~string](s S) string {
strs := make([]string, 0, len(s))
for _, v := range s {
strs = append(strs, string(v))
}
sort.Strings(strs)
return strings.Join(strs, ",")
}

View File

@ -1,215 +0,0 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package label
import (
"fmt"
"io"
"reflect"
"unsafe"
)
// Key is used as the identity of a Label.
// Keys are intended to be compared by pointer only, the name should be unique
// for communicating with external systems, but it is not required or enforced.
type Key interface {
// Name returns the key name.
Name() string
// Description returns a string that can be used to describe the value.
Description() string
// Format is used in formatting to append the value of the label to the
// supplied buffer.
// The formatter may use the supplied buf as a scratch area to avoid
// allocations.
Format(w io.Writer, buf []byte, l Label)
}
// Label holds a key and value pair.
// It is normally used when passing around lists of labels.
type Label struct {
key Key
packed uint64
untyped interface{}
}
// Map is the interface to a collection of Labels indexed by key.
type Map interface {
// Find returns the label that matches the supplied key.
Find(key Key) Label
}
// List is the interface to something that provides an iterable
// list of labels.
// Iteration should start from 0 and continue until Valid returns false.
type List interface {
// Valid returns true if the index is within range for the list.
// It does not imply the label at that index will itself be valid.
Valid(index int) bool
// Label returns the label at the given index.
Label(index int) Label
}
// list implements LabelList for a list of Labels.
type list struct {
labels []Label
}
// filter wraps a LabelList filtering out specific labels.
type filter struct {
keys []Key
underlying List
}
// listMap implements LabelMap for a simple list of labels.
type listMap struct {
labels []Label
}
// mapChain implements LabelMap for a list of underlying LabelMap.
type mapChain struct {
maps []Map
}
// OfValue creates a new label from the key and value.
// This method is for implementing new key types, label creation should
// normally be done with the Of method of the key.
func OfValue(k Key, value interface{}) Label { return Label{key: k, untyped: value} }
// UnpackValue assumes the label was built using LabelOfValue and returns the value
// that was passed to that constructor.
// This method is for implementing new key types, for type safety normal
// access should be done with the From method of the key.
func (t Label) UnpackValue() interface{} { return t.untyped }
// Of64 creates a new label from a key and a uint64. This is often
// used for non uint64 values that can be packed into a uint64.
// This method is for implementing new key types, label creation should
// normally be done with the Of method of the key.
func Of64(k Key, v uint64) Label { return Label{key: k, packed: v} }
// Unpack64 assumes the label was built using LabelOf64 and returns the value that
// was passed to that constructor.
// This method is for implementing new key types, for type safety normal
// access should be done with the From method of the key.
func (t Label) Unpack64() uint64 { return t.packed }
type stringptr unsafe.Pointer
// OfString creates a new label from a key and a string.
// This method is for implementing new key types, label creation should
// normally be done with the Of method of the key.
func OfString(k Key, v string) Label {
hdr := (*reflect.StringHeader)(unsafe.Pointer(&v))
return Label{
key: k,
packed: uint64(hdr.Len),
untyped: stringptr(hdr.Data),
}
}
// UnpackString assumes the label was built using LabelOfString and returns the
// value that was passed to that constructor.
// This method is for implementing new key types, for type safety normal
// access should be done with the From method of the key.
func (t Label) UnpackString() string {
var v string
hdr := (*reflect.StringHeader)(unsafe.Pointer(&v))
hdr.Data = uintptr(t.untyped.(stringptr))
hdr.Len = int(t.packed)
return v
}
// Valid returns true if the Label is a valid one (it has a key).
func (t Label) Valid() bool { return t.key != nil }
// Key returns the key of this Label.
func (t Label) Key() Key { return t.key }
// Format is used for debug printing of labels.
func (t Label) Format(f fmt.State, r rune) {
if !t.Valid() {
io.WriteString(f, `nil`)
return
}
io.WriteString(f, t.Key().Name())
io.WriteString(f, "=")
var buf [128]byte
t.Key().Format(f, buf[:0], t)
}
func (l *list) Valid(index int) bool {
return index >= 0 && index < len(l.labels)
}
func (l *list) Label(index int) Label {
return l.labels[index]
}
func (f *filter) Valid(index int) bool {
return f.underlying.Valid(index)
}
func (f *filter) Label(index int) Label {
l := f.underlying.Label(index)
for _, f := range f.keys {
if l.Key() == f {
return Label{}
}
}
return l
}
func (lm listMap) Find(key Key) Label {
for _, l := range lm.labels {
if l.Key() == key {
return l
}
}
return Label{}
}
func (c mapChain) Find(key Key) Label {
for _, src := range c.maps {
l := src.Find(key)
if l.Valid() {
return l
}
}
return Label{}
}
var emptyList = &list{}
func NewList(labels ...Label) List {
if len(labels) == 0 {
return emptyList
}
return &list{labels: labels}
}
func Filter(l List, keys ...Key) List {
if len(keys) == 0 {
return l
}
return &filter{keys: keys, underlying: l}
}
func NewMap(labels ...Label) Map {
return listMap{labels: labels}
}
func MergeMaps(srcs ...Map) Map {
var nonNil []Map
for _, src := range srcs {
if src != nil {
nonNil = append(nonNil, src)
}
}
if len(nonNil) == 1 {
return nonNil[0]
}
return mapChain{maps: nonNil}
}

View File

@ -1,89 +0,0 @@
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This file contains the remaining vestiges of
// $GOROOT/src/go/internal/gcimporter/bimport.go.
package gcimporter
import (
"fmt"
"go/token"
"go/types"
"sync"
)
func errorf(format string, args ...interface{}) {
panic(fmt.Sprintf(format, args...))
}
const deltaNewFile = -64 // see cmd/compile/internal/gc/bexport.go
// Synthesize a token.Pos
type fakeFileSet struct {
fset *token.FileSet
files map[string]*fileInfo
}
type fileInfo struct {
file *token.File
lastline int
}
const maxlines = 64 * 1024
func (s *fakeFileSet) pos(file string, line, column int) token.Pos {
// TODO(mdempsky): Make use of column.
// Since we don't know the set of needed file positions, we reserve maxlines
// positions per file. We delay calling token.File.SetLines until all
// positions have been calculated (by way of fakeFileSet.setLines), so that
// we can avoid setting unnecessary lines. See also golang/go#46586.
f := s.files[file]
if f == nil {
f = &fileInfo{file: s.fset.AddFile(file, -1, maxlines)}
s.files[file] = f
}
if line > maxlines {
line = 1
}
if line > f.lastline {
f.lastline = line
}
// Return a fake position assuming that f.file consists only of newlines.
return token.Pos(f.file.Base() + line - 1)
}
func (s *fakeFileSet) setLines() {
fakeLinesOnce.Do(func() {
fakeLines = make([]int, maxlines)
for i := range fakeLines {
fakeLines[i] = i
}
})
for _, f := range s.files {
f.file.SetLines(fakeLines[:f.lastline])
}
}
var (
fakeLines []int
fakeLinesOnce sync.Once
)
func chanDir(d int) types.ChanDir {
// tag values must match the constants in cmd/compile/internal/gc/go.go
switch d {
case 1 /* Crecv */ :
return types.RecvOnly
case 2 /* Csend */ :
return types.SendOnly
case 3 /* Cboth */ :
return types.SendRecv
default:
errorf("unexpected channel dir %d", d)
return 0
}
}

View File

@ -1,421 +0,0 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This file should be kept in sync with $GOROOT/src/internal/exportdata/exportdata.go.
// This file also additionally implements FindExportData for gcexportdata.NewReader.
package gcimporter
import (
"bufio"
"bytes"
"errors"
"fmt"
"go/build"
"io"
"os"
"os/exec"
"path/filepath"
"strings"
"sync"
)
// FindExportData positions the reader r at the beginning of the
// export data section of an underlying cmd/compile created archive
// file by reading from it. The reader must be positioned at the
// start of the file before calling this function.
// This returns the length of the export data in bytes.
//
// This function is needed by [gcexportdata.Read], which must
// accept inputs produced by the last two releases of cmd/compile,
// plus tip.
func FindExportData(r *bufio.Reader) (size int64, err error) {
arsize, err := FindPackageDefinition(r)
if err != nil {
return
}
size = int64(arsize)
objapi, headers, err := ReadObjectHeaders(r)
if err != nil {
return
}
size -= int64(len(objapi))
for _, h := range headers {
size -= int64(len(h))
}
// Check for the binary export data section header "$$B\n".
// TODO(taking): Unify with ReadExportDataHeader so that it stops at the 'u' instead of reading
line, err := r.ReadSlice('\n')
if err != nil {
return
}
hdr := string(line)
if hdr != "$$B\n" {
err = fmt.Errorf("unknown export data header: %q", hdr)
return
}
size -= int64(len(hdr))
// For files with a binary export data header "$$B\n",
// these are always terminated by an end-of-section marker "\n$$\n".
// So the last bytes must always be this constant.
//
// The end-of-section marker is not a part of the export data itself.
// Do not include these in size.
//
// It would be nice to have sanity check that the final bytes after
// the export data are indeed the end-of-section marker. The split
// of gcexportdata.NewReader and gcexportdata.Read make checking this
// ugly so gcimporter gives up enforcing this. The compiler and go/types
// importer do enforce this, which seems good enough.
const endofsection = "\n$$\n"
size -= int64(len(endofsection))
if size < 0 {
err = fmt.Errorf("invalid size (%d) in the archive file: %d bytes remain without section headers (recompile package)", arsize, size)
return
}
return
}
// ReadUnified reads the contents of the unified export data from a reader r
// that contains the contents of a GC-created archive file.
//
// On success, the reader will be positioned after the end-of-section marker "\n$$\n".
//
// Supported GC-created archive files have 4 layers of nesting:
// - An archive file containing a package definition file.
// - The package definition file contains headers followed by a data section.
// Headers are lines (≤ 4kb) that do not start with "$$".
// - The data section starts with "$$B\n" followed by export data followed
// by an end of section marker "\n$$\n". (The section start "$$\n" is no
// longer supported.)
// - The export data starts with a format byte ('u') followed by the <data> in
// the given format. (See ReadExportDataHeader for older formats.)
//
// Putting this together, the bytes in a GC-created archive files are expected
// to look like the following.
// See cmd/internal/archive for more details on ar file headers.
//
// | <!arch>\n | ar file signature
// | __.PKGDEF...size...\n | ar header for __.PKGDEF including size.
// | go object <...>\n | objabi header
// | <optional headers>\n | other headers such as build id
// | $$B\n | binary format marker
// | u<data>\n | unified export <data>
// | $$\n | end-of-section marker
// | [optional padding] | padding byte (0x0A) if size is odd
// | [ar file header] | other ar files
// | [ar file data] |
func ReadUnified(r *bufio.Reader) (data []byte, err error) {
// We historically guaranteed headers at the default buffer size (4096) work.
// This ensures we can use ReadSlice throughout.
const minBufferSize = 4096
r = bufio.NewReaderSize(r, minBufferSize)
size, err := FindPackageDefinition(r)
if err != nil {
return
}
n := size
objapi, headers, err := ReadObjectHeaders(r)
if err != nil {
return
}
n -= len(objapi)
for _, h := range headers {
n -= len(h)
}
hdrlen, err := ReadExportDataHeader(r)
if err != nil {
return
}
n -= hdrlen
// size also includes the end of section marker. Remove that many bytes from the end.
const marker = "\n$$\n"
n -= len(marker)
if n < 0 {
err = fmt.Errorf("invalid size (%d) in the archive file: %d bytes remain without section headers (recompile package)", size, n)
return
}
// Read n bytes from buf.
data = make([]byte, n)
_, err = io.ReadFull(r, data)
if err != nil {
return
}
// Check for marker at the end.
var suffix [len(marker)]byte
_, err = io.ReadFull(r, suffix[:])
if err != nil {
return
}
if s := string(suffix[:]); s != marker {
err = fmt.Errorf("read %q instead of end-of-section marker (%q)", s, marker)
return
}
return
}
// FindPackageDefinition positions the reader r at the beginning of a package
// definition file ("__.PKGDEF") within a GC-created archive by reading
// from it, and returns the size of the package definition file in the archive.
//
// The reader must be positioned at the start of the archive file before calling
// this function, and "__.PKGDEF" is assumed to be the first file in the archive.
//
// See cmd/internal/archive for details on the archive format.
func FindPackageDefinition(r *bufio.Reader) (size int, err error) {
// Uses ReadSlice to limit risk of malformed inputs.
// Read first line to make sure this is an object file.
line, err := r.ReadSlice('\n')
if err != nil {
err = fmt.Errorf("can't find export data (%v)", err)
return
}
// Is the first line an archive file signature?
if string(line) != "!<arch>\n" {
err = fmt.Errorf("not the start of an archive file (%q)", line)
return
}
// package export block should be first
size = readArchiveHeader(r, "__.PKGDEF")
if size <= 0 {
err = fmt.Errorf("not a package file")
return
}
return
}
// ReadObjectHeaders reads object headers from the reader. Object headers are
// lines that do not start with an end-of-section marker "$$". The first header
// is the objabi header. On success, the reader will be positioned at the beginning
// of the end-of-section marker.
//
// It returns an error if any header does not fit in r.Size() bytes.
func ReadObjectHeaders(r *bufio.Reader) (objapi string, headers []string, err error) {
// line is a temporary buffer for headers.
// Use bounded reads (ReadSlice, Peek) to limit risk of malformed inputs.
var line []byte
// objapi header should be the first line
if line, err = r.ReadSlice('\n'); err != nil {
err = fmt.Errorf("can't find export data (%v)", err)
return
}
objapi = string(line)
// objapi header begins with "go object ".
if !strings.HasPrefix(objapi, "go object ") {
err = fmt.Errorf("not a go object file: %s", objapi)
return
}
// process remaining object header lines
for {
// check for an end of section marker "$$"
line, err = r.Peek(2)
if err != nil {
return
}
if string(line) == "$$" {
return // stop
}
// read next header
line, err = r.ReadSlice('\n')
if err != nil {
return
}
headers = append(headers, string(line))
}
}
// ReadExportDataHeader reads the export data header and format from r.
// It returns the number of bytes read, or an error if the format is no longer
// supported or it failed to read.
//
// The only currently supported format is binary export data in the
// unified export format.
func ReadExportDataHeader(r *bufio.Reader) (n int, err error) {
// Read export data header.
line, err := r.ReadSlice('\n')
if err != nil {
return
}
hdr := string(line)
switch hdr {
case "$$\n":
err = fmt.Errorf("old textual export format no longer supported (recompile package)")
return
case "$$B\n":
var format byte
format, err = r.ReadByte()
if err != nil {
return
}
// The unified export format starts with a 'u'.
switch format {
case 'u':
default:
// Older no longer supported export formats include:
// indexed export format which started with an 'i'; and
// the older binary export format which started with a 'c',
// 'd', or 'v' (from "version").
err = fmt.Errorf("binary export format %q is no longer supported (recompile package)", format)
return
}
default:
err = fmt.Errorf("unknown export data header: %q", hdr)
return
}
n = len(hdr) + 1 // + 1 is for 'u'
return
}
// FindPkg returns the filename and unique package id for an import
// path based on package information provided by build.Import (using
// the build.Default build.Context). A relative srcDir is interpreted
// relative to the current working directory.
//
// FindPkg is only used in tests within x/tools.
func FindPkg(path, srcDir string) (filename, id string, err error) {
// TODO(taking): Move internal/exportdata.FindPkg into its own file,
// and then this copy into a _test package.
if path == "" {
return "", "", errors.New("path is empty")
}
var noext string
switch {
default:
// "x" -> "$GOPATH/pkg/$GOOS_$GOARCH/x.ext", "x"
// Don't require the source files to be present.
if abs, err := filepath.Abs(srcDir); err == nil { // see issue 14282
srcDir = abs
}
var bp *build.Package
bp, err = build.Import(path, srcDir, build.FindOnly|build.AllowBinary)
if bp.PkgObj == "" {
if bp.Goroot && bp.Dir != "" {
filename, err = lookupGorootExport(bp.Dir)
if err == nil {
_, err = os.Stat(filename)
}
if err == nil {
return filename, bp.ImportPath, nil
}
}
goto notfound
} else {
noext = strings.TrimSuffix(bp.PkgObj, ".a")
}
id = bp.ImportPath
case build.IsLocalImport(path):
// "./x" -> "/this/directory/x.ext", "/this/directory/x"
noext = filepath.Join(srcDir, path)
id = noext
case filepath.IsAbs(path):
// for completeness only - go/build.Import
// does not support absolute imports
// "/x" -> "/x.ext", "/x"
noext = path
id = path
}
if false { // for debugging
if path != id {
fmt.Printf("%s -> %s\n", path, id)
}
}
// try extensions
for _, ext := range pkgExts {
filename = noext + ext
f, statErr := os.Stat(filename)
if statErr == nil && !f.IsDir() {
return filename, id, nil
}
if err == nil {
err = statErr
}
}
notfound:
if err == nil {
return "", path, fmt.Errorf("can't find import: %q", path)
}
return "", path, fmt.Errorf("can't find import: %q: %w", path, err)
}
var pkgExts = [...]string{".a", ".o"} // a file from the build cache will have no extension
var exportMap sync.Map // package dir → func() (string, error)
// lookupGorootExport returns the location of the export data
// (normally found in the build cache, but located in GOROOT/pkg
// in prior Go releases) for the package located in pkgDir.
//
// (We use the package's directory instead of its import path
// mainly to simplify handling of the packages in src/vendor
// and cmd/vendor.)
//
// lookupGorootExport is only used in tests within x/tools.
func lookupGorootExport(pkgDir string) (string, error) {
f, ok := exportMap.Load(pkgDir)
if !ok {
var (
listOnce sync.Once
exportPath string
err error
)
f, _ = exportMap.LoadOrStore(pkgDir, func() (string, error) {
listOnce.Do(func() {
cmd := exec.Command(filepath.Join(build.Default.GOROOT, "bin", "go"), "list", "-export", "-f", "{{.Export}}", pkgDir)
cmd.Dir = build.Default.GOROOT
cmd.Env = append(os.Environ(), "PWD="+cmd.Dir, "GOROOT="+build.Default.GOROOT)
var output []byte
output, err = cmd.Output()
if err != nil {
if ee, ok := err.(*exec.ExitError); ok && len(ee.Stderr) > 0 {
err = errors.New(string(ee.Stderr))
}
return
}
exports := strings.Split(string(bytes.TrimSpace(output)), "\n")
if len(exports) != 1 {
err = fmt.Errorf("go list reported %d exports; expected 1", len(exports))
return
}
exportPath = exports[0]
})
return exportPath, err
})
}
return f.(func() (string, error))()
}

View File

@ -1,108 +0,0 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// This file is a reduced copy of $GOROOT/src/go/internal/gcimporter/gcimporter.go.
// Package gcimporter provides various functions for reading
// gc-generated object files that can be used to implement the
// Importer interface defined by the Go 1.5 standard library package.
//
// The encoding is deterministic: if the encoder is applied twice to
// the same types.Package data structure, both encodings are equal.
// This property may be important to avoid spurious changes in
// applications such as build systems.
//
// However, the encoder is not necessarily idempotent. Importing an
// exported package may yield a types.Package that, while it
// represents the same set of Go types as the original, may differ in
// the details of its internal representation. Because of these
// differences, re-encoding the imported package may yield a
// different, but equally valid, encoding of the package.
package gcimporter // import "golang.org/x/tools/internal/gcimporter"
import (
"bufio"
"fmt"
"go/token"
"go/types"
"io"
"os"
)
const (
// Enable debug during development: it adds some additional checks, and
// prevents errors from being recovered.
debug = false
// If trace is set, debugging output is printed to std out.
trace = false
)
// Import imports a gc-generated package given its import path and srcDir, adds
// the corresponding package object to the packages map, and returns the object.
// The packages map must contain all packages already imported.
//
// Import is only used in tests.
func Import(fset *token.FileSet, packages map[string]*types.Package, path, srcDir string, lookup func(path string) (io.ReadCloser, error)) (pkg *types.Package, err error) {
var rc io.ReadCloser
var id string
if lookup != nil {
// With custom lookup specified, assume that caller has
// converted path to a canonical import path for use in the map.
if path == "unsafe" {
return types.Unsafe, nil
}
id = path
// No need to re-import if the package was imported completely before.
if pkg = packages[id]; pkg != nil && pkg.Complete() {
return
}
f, err := lookup(path)
if err != nil {
return nil, err
}
rc = f
} else {
var filename string
filename, id, err = FindPkg(path, srcDir)
if filename == "" {
if path == "unsafe" {
return types.Unsafe, nil
}
return nil, err
}
// no need to re-import if the package was imported completely before
if pkg = packages[id]; pkg != nil && pkg.Complete() {
return
}
// open file
f, err := os.Open(filename)
if err != nil {
return nil, err
}
defer func() {
if err != nil {
// add file name to error
err = fmt.Errorf("%s: %v", filename, err)
}
}()
rc = f
}
defer rc.Close()
buf := bufio.NewReader(rc)
data, err := ReadUnified(buf)
if err != nil {
err = fmt.Errorf("import %q: %v", path, err)
return
}
// unified: emitted by cmd/compile since go1.20.
_, pkg, err = UImportData(fset, packages, data, id)
return
}

Some files were not shown because too many files have changed in this diff Show More