0
0
forked from toolshed/abra

chore: go mod vendor / tidy

This commit is contained in:
2025-01-03 20:21:06 +01:00
parent a379b31a19
commit b9f2d1f568
182 changed files with 184390 additions and 209 deletions

213
vendor/github.com/mmcloughlin/avo/pass/alloc.go generated vendored Normal file
View File

@ -0,0 +1,213 @@
package pass
import (
"errors"
"math"
"sort"
"github.com/mmcloughlin/avo/reg"
)
// edge is an edge of the interference graph, indicating that registers X and Y
// must be in non-conflicting registers.
type edge struct {
X, Y reg.ID
}
// Allocator is a graph-coloring register allocator.
type Allocator struct {
registers []reg.ID
priority map[reg.ID]int
allocation reg.Allocation
edges []*edge
possible map[reg.ID][]reg.ID
}
// NewAllocator builds an allocator for the given physical registers.
func NewAllocator(rs []reg.Physical) (*Allocator, error) {
// Set of IDs, excluding restricted registers.
idset := map[reg.ID]bool{}
for _, r := range rs {
if (r.Info() & reg.Restricted) != 0 {
continue
}
idset[r.ID()] = true
}
if len(idset) == 0 {
return nil, errors.New("no allocatable registers")
}
// Produce slice of unique register IDs.
var ids []reg.ID
for id := range idset {
ids = append(ids, id)
}
a := &Allocator{
registers: ids,
priority: map[reg.ID]int{},
allocation: reg.NewEmptyAllocation(),
possible: map[reg.ID][]reg.ID{},
}
a.sortregisters()
return a, nil
}
// NewAllocatorForKind builds an allocator for the given kind of registers.
func NewAllocatorForKind(k reg.Kind) (*Allocator, error) {
f := reg.FamilyOfKind(k)
if f == nil {
return nil, errors.New("unknown register family")
}
return NewAllocator(f.Registers())
}
// SetPriority sets the priority of the given regiser to p. Higher priority
// registers are preferred in allocations. By default all registers have 0
// priority. Priority will only apply to subsequent Add() calls, therefore
// typically all SetPriority calls should happen at allocator initialization.
func (a *Allocator) SetPriority(id reg.ID, p int) {
a.priority[id] = p
a.sortregisters()
}
// sortregisters sorts the list of available registers: higher priority first,
// falling back to sorting by ID.
func (a *Allocator) sortregisters() {
sort.Slice(a.registers, func(i, j int) bool {
ri, rj := a.registers[i], a.registers[j]
pi, pj := a.priority[ri], a.priority[rj]
return (pi > pj) || (pi == pj && ri < rj)
})
}
// AddInterferenceSet records that r interferes with every register in s. Convenience wrapper around AddInterference.
func (a *Allocator) AddInterferenceSet(r reg.Register, s reg.MaskSet) {
for id, mask := range s {
if (r.Mask() & mask) != 0 {
a.AddInterference(r.ID(), id)
}
}
}
// AddInterference records that x and y must be assigned to non-conflicting physical registers.
func (a *Allocator) AddInterference(x, y reg.ID) {
a.Add(x)
a.Add(y)
a.edges = append(a.edges, &edge{X: x, Y: y})
}
// Add adds a register to be allocated. Does nothing if the register has already been added.
func (a *Allocator) Add(v reg.ID) {
if !v.IsVirtual() {
return
}
if _, found := a.possible[v]; found {
return
}
a.possible[v] = a.possibleregisters(v)
}
// Allocate allocates physical registers.
func (a *Allocator) Allocate() (reg.Allocation, error) {
for {
if err := a.update(); err != nil {
return nil, err
}
if a.remaining() == 0 {
break
}
v := a.mostrestricted()
if err := a.alloc(v); err != nil {
return nil, err
}
}
return a.allocation, nil
}
// update possible allocations based on edges.
func (a *Allocator) update() error {
var rem []*edge
for _, e := range a.edges {
x := a.allocation.LookupDefault(e.X)
y := a.allocation.LookupDefault(e.Y)
switch {
case x.IsVirtual() && y.IsVirtual():
rem = append(rem, e)
continue
case x.IsPhysical() && y.IsPhysical():
if x == y {
return errors.New("impossible register allocation")
}
case x.IsPhysical() && y.IsVirtual():
a.discardconflicting(y, x)
case x.IsVirtual() && y.IsPhysical():
a.discardconflicting(x, y)
default:
panic("unreachable")
}
}
a.edges = rem
return nil
}
// mostrestricted returns the virtual register with the least possibilities.
func (a *Allocator) mostrestricted() reg.ID {
n := int(math.MaxInt32)
var v reg.ID
for w, p := range a.possible {
// On a tie, choose the smallest ID in numeric order. This avoids
// non-deterministic allocations due to map iteration order.
if len(p) < n || (len(p) == n && w < v) {
n = len(p)
v = w
}
}
return v
}
// discardconflicting removes registers from vs possible list that conflict with p.
func (a *Allocator) discardconflicting(v, p reg.ID) {
a.possible[v] = filterregisters(a.possible[v], func(r reg.ID) bool {
return r != p
})
}
// alloc attempts to allocate a register to v.
func (a *Allocator) alloc(v reg.ID) error {
ps := a.possible[v]
if len(ps) == 0 {
return errors.New("failed to allocate registers")
}
p := ps[0]
a.allocation[v] = p
delete(a.possible, v)
return nil
}
// remaining returns the number of unallocated registers.
func (a *Allocator) remaining() int {
return len(a.possible)
}
// possibleregisters returns all allocate-able registers for the given virtual.
func (a *Allocator) possibleregisters(v reg.ID) []reg.ID {
return filterregisters(a.registers, func(r reg.ID) bool {
return v.Kind() == r.Kind()
})
}
func filterregisters(in []reg.ID, predicate func(reg.ID) bool) []reg.ID {
var rs []reg.ID
for _, r := range in {
if predicate(r) {
rs = append(rs, r)
}
}
return rs
}

81
vendor/github.com/mmcloughlin/avo/pass/cfg.go generated vendored Normal file
View File

@ -0,0 +1,81 @@
package pass
import (
"errors"
"fmt"
"github.com/mmcloughlin/avo/ir"
)
// LabelTarget populates the LabelTarget of the given function. This maps from
// label name to the following instruction.
func LabelTarget(fn *ir.Function) error {
target := map[ir.Label]*ir.Instruction{}
var pending []ir.Label
for _, node := range fn.Nodes {
switch n := node.(type) {
case ir.Label:
if _, found := target[n]; found {
return fmt.Errorf("duplicate label \"%s\"", n)
}
pending = append(pending, n)
case *ir.Instruction:
for _, label := range pending {
target[label] = n
}
pending = nil
}
}
if len(pending) != 0 {
return errors.New("function ends with label")
}
fn.LabelTarget = target
return nil
}
// CFG constructs the call-flow-graph for the function.
func CFG(fn *ir.Function) error {
is := fn.Instructions()
n := len(is)
// Populate successors.
for i := 0; i < n; i++ {
cur := is[i]
var nxt *ir.Instruction
if i+1 < n {
nxt = is[i+1]
}
// If it's a branch, locate the target.
if cur.IsBranch {
lbl := cur.TargetLabel()
if lbl == nil {
return errors.New("no label for branch instruction")
}
target, found := fn.LabelTarget[*lbl]
if !found {
return fmt.Errorf("unknown label %q", *lbl)
}
cur.Succ = append(cur.Succ, target)
}
// Otherwise, could continue to the following instruction.
switch {
case cur.IsTerminal:
case cur.IsUnconditionalBranch():
default:
cur.Succ = append(cur.Succ, nxt)
}
}
// Populate predecessors.
for _, i := range is {
for _, s := range i.Succ {
if s != nil {
s.Pred = append(s.Pred, i)
}
}
}
return nil
}

123
vendor/github.com/mmcloughlin/avo/pass/cleanup.go generated vendored Normal file
View File

@ -0,0 +1,123 @@
package pass
import (
"github.com/mmcloughlin/avo/ir"
"github.com/mmcloughlin/avo/operand"
)
// PruneJumpToFollowingLabel removes jump instructions that target an
// immediately following label.
func PruneJumpToFollowingLabel(fn *ir.Function) error {
for i := 0; i+1 < len(fn.Nodes); i++ {
node := fn.Nodes[i]
next := fn.Nodes[i+1]
// This node is an unconditional jump.
inst, ok := node.(*ir.Instruction)
if !ok || !inst.IsBranch || inst.IsConditional {
continue
}
target := inst.TargetLabel()
if target == nil {
continue
}
// And the jump target is the immediately following node.
lbl, ok := next.(ir.Label)
if !ok || lbl != *target {
continue
}
// Then the jump is unnecessary and can be removed.
fn.Nodes = deletenode(fn.Nodes, i)
i--
}
return nil
}
// PruneDanglingLabels removes labels that are not referenced by any branches.
func PruneDanglingLabels(fn *ir.Function) error {
// Count label references.
count := map[ir.Label]int{}
for _, n := range fn.Nodes {
i, ok := n.(*ir.Instruction)
if !ok || !i.IsBranch {
continue
}
target := i.TargetLabel()
if target == nil {
continue
}
count[*target]++
}
// Look for labels with no references.
for i := 0; i < len(fn.Nodes); i++ {
node := fn.Nodes[i]
lbl, ok := node.(ir.Label)
if !ok {
continue
}
if count[lbl] == 0 {
fn.Nodes = deletenode(fn.Nodes, i)
i--
}
}
return nil
}
// PruneSelfMoves removes move instructions from one register to itself.
func PruneSelfMoves(fn *ir.Function) error {
return removeinstructions(fn, func(i *ir.Instruction) bool {
switch i.Opcode {
case "MOVB", "MOVW", "MOVL", "MOVQ":
default:
return false
}
return operand.IsRegister(i.Operands[0]) && operand.IsRegister(i.Operands[1]) && i.Operands[0] == i.Operands[1]
})
}
// removeinstructions deletes instructions from the given function which match predicate.
func removeinstructions(fn *ir.Function, predicate func(*ir.Instruction) bool) error {
// Removal of instructions has the potential to invalidate CFG structures.
// Clear them to prevent accidental use of stale structures after this pass.
invalidatecfg(fn)
for i := 0; i < len(fn.Nodes); i++ {
n := fn.Nodes[i]
inst, ok := n.(*ir.Instruction)
if !ok || !predicate(inst) {
continue
}
fn.Nodes = deletenode(fn.Nodes, i)
}
return nil
}
// deletenode deletes node i from nodes and returns the resulting slice.
func deletenode(nodes []ir.Node, i int) []ir.Node {
n := len(nodes)
copy(nodes[i:], nodes[i+1:])
nodes[n-1] = nil
return nodes[:n-1]
}
// invalidatecfg clears CFG structures.
func invalidatecfg(fn *ir.Function) {
fn.LabelTarget = nil
for _, i := range fn.Instructions() {
i.Pred = nil
i.Succ = nil
}
}

31
vendor/github.com/mmcloughlin/avo/pass/isa.go generated vendored Normal file
View File

@ -0,0 +1,31 @@
package pass
import (
"sort"
"github.com/mmcloughlin/avo/ir"
)
// RequiredISAExtensions determines ISA extensions required for the given
// function. Populates the ISA field.
func RequiredISAExtensions(fn *ir.Function) error {
// Collect ISA set.
set := map[string]bool{}
for _, i := range fn.Instructions() {
for _, isa := range i.ISA {
set[isa] = true
}
}
if len(set) == 0 {
return nil
}
// Populate the function's ISA field with the unique sorted list.
for isa := range set {
fn.ISA = append(fn.ISA, isa)
}
sort.Strings(fn.ISA)
return nil
}

101
vendor/github.com/mmcloughlin/avo/pass/pass.go generated vendored Normal file
View File

@ -0,0 +1,101 @@
// Package pass implements processing passes on avo Files.
package pass
import (
"io"
"github.com/mmcloughlin/avo/ir"
"github.com/mmcloughlin/avo/printer"
)
// Compile pass compiles an avo file. Upon successful completion the avo file
// may be printed to Go assembly.
var Compile = Concat(
Verify,
FunctionPass(PruneJumpToFollowingLabel),
FunctionPass(PruneDanglingLabels),
FunctionPass(LabelTarget),
FunctionPass(CFG),
InstructionPass(ZeroExtend32BitOutputs),
FunctionPass(Liveness),
FunctionPass(AllocateRegisters),
FunctionPass(BindRegisters),
FunctionPass(VerifyAllocation),
FunctionPass(EnsureBasePointerCalleeSaved),
Func(IncludeTextFlagHeader),
FunctionPass(PruneSelfMoves),
FunctionPass(RequiredISAExtensions),
)
// Interface for a processing pass.
type Interface interface {
Execute(*ir.File) error
}
// Func adapts a function to the pass Interface.
type Func func(*ir.File) error
// Execute calls p.
func (p Func) Execute(f *ir.File) error {
return p(f)
}
// FunctionPass is a convenience for implementing a full file pass with a
// function that operates on each avo Function independently.
type FunctionPass func(*ir.Function) error
// Execute calls p on every function in the file. Exits on the first error.
func (p FunctionPass) Execute(f *ir.File) error {
for _, fn := range f.Functions() {
if err := p(fn); err != nil {
return err
}
}
return nil
}
// InstructionPass is a convenience for implementing a full file pass with a
// function that operates on each Instruction independently.
type InstructionPass func(*ir.Instruction) error
// Execute calls p on every instruction in the file. Exits on the first error.
func (p InstructionPass) Execute(f *ir.File) error {
for _, fn := range f.Functions() {
for _, i := range fn.Instructions() {
if err := p(i); err != nil {
return err
}
}
}
return nil
}
// Concat returns a pass that executes the given passes in order, stopping on the first error.
func Concat(passes ...Interface) Interface {
return Func(func(f *ir.File) error {
for _, p := range passes {
if err := p.Execute(f); err != nil {
return err
}
}
return nil
})
}
// Output pass prints a file.
type Output struct {
Writer io.WriteCloser
Printer printer.Printer
}
// Execute prints f with the configured Printer and writes output to Writer.
func (o *Output) Execute(f *ir.File) error {
b, err := o.Printer.Print(f)
if err != nil {
return err
}
if _, err = o.Writer.Write(b); err != nil {
return err
}
return o.Writer.Close()
}

223
vendor/github.com/mmcloughlin/avo/pass/reg.go generated vendored Normal file
View File

@ -0,0 +1,223 @@
package pass
import (
"errors"
"github.com/mmcloughlin/avo/gotypes"
"github.com/mmcloughlin/avo/ir"
"github.com/mmcloughlin/avo/operand"
"github.com/mmcloughlin/avo/reg"
)
// ZeroExtend32BitOutputs applies the rule that "32-bit operands generate a
// 32-bit result, zero-extended to a 64-bit result in the destination
// general-purpose register" (Intel Software Developers Manual, Volume 1,
// 3.4.1.1).
func ZeroExtend32BitOutputs(i *ir.Instruction) error {
for j, op := range i.Outputs {
if !operand.IsR32(op) {
continue
}
r, ok := op.(reg.GP)
if !ok {
panic("r32 operand should satisfy reg.GP")
}
i.Outputs[j] = r.As64()
}
return nil
}
// Liveness computes register liveness.
func Liveness(fn *ir.Function) error {
// Note this implementation is initially naive so as to be "obviously correct".
// There are a well-known optimizations we can apply if necessary.
is := fn.Instructions()
// Process instructions in reverse: poor approximation to topological sort.
// TODO(mbm): process instructions in topological sort order
for l, r := 0, len(is)-1; l < r; l, r = l+1, r-1 {
is[l], is[r] = is[r], is[l]
}
// Initialize.
for _, i := range is {
i.LiveIn = reg.NewMaskSetFromRegisters(i.InputRegisters())
i.LiveOut = reg.NewEmptyMaskSet()
}
// Iterative dataflow analysis.
for {
changes := false
for _, i := range is {
// out[n] = UNION[s IN succ[n]] in[s]
for _, s := range i.Succ {
if s == nil {
continue
}
changes = i.LiveOut.Update(s.LiveIn) || changes
}
// in[n] = use[n] UNION (out[n] - def[n])
def := reg.NewMaskSetFromRegisters(i.OutputRegisters())
changes = i.LiveIn.Update(i.LiveOut.Difference(def)) || changes
}
if !changes {
break
}
}
return nil
}
// AllocateRegisters performs register allocation.
func AllocateRegisters(fn *ir.Function) error {
// Initialize one allocator per kind.
as := map[reg.Kind]*Allocator{}
for _, i := range fn.Instructions() {
for _, r := range i.Registers() {
k := r.Kind()
if _, found := as[k]; !found {
a, err := NewAllocatorForKind(k)
if err != nil {
return err
}
as[k] = a
}
}
}
// De-prioritize the base pointer register. This can be used as a general
// purpose register, but it's callee-save so needs to be saved/restored if
// it is clobbered. For this reason we prefer to avoid using it unless
// forced to by register pressure.
for k, a := range as {
f := reg.FamilyOfKind(k)
for _, r := range f.Registers() {
if (r.Info() & reg.BasePointer) != 0 {
// Negative priority penalizes this register relative to all
// others (having default zero priority).
a.SetPriority(r.ID(), -1)
}
}
}
// Populate registers to be allocated.
for _, i := range fn.Instructions() {
for _, r := range i.Registers() {
as[r.Kind()].Add(r.ID())
}
}
// Record register interferences.
for _, i := range fn.Instructions() {
for _, d := range i.OutputRegisters() {
k := d.Kind()
out := i.LiveOut.OfKind(k)
out.DiscardRegister(d)
as[k].AddInterferenceSet(d, out)
}
}
// Execute register allocation.
fn.Allocation = reg.NewEmptyAllocation()
for _, a := range as {
al, err := a.Allocate()
if err != nil {
return err
}
if err := fn.Allocation.Merge(al); err != nil {
return err
}
}
return nil
}
// BindRegisters applies the result of register allocation, replacing all virtual registers with their assigned physical registers.
func BindRegisters(fn *ir.Function) error {
for _, i := range fn.Instructions() {
for idx := range i.Operands {
i.Operands[idx] = operand.ApplyAllocation(i.Operands[idx], fn.Allocation)
}
for idx := range i.Inputs {
i.Inputs[idx] = operand.ApplyAllocation(i.Inputs[idx], fn.Allocation)
}
for idx := range i.Outputs {
i.Outputs[idx] = operand.ApplyAllocation(i.Outputs[idx], fn.Allocation)
}
}
return nil
}
// VerifyAllocation performs sanity checks following register allocation.
func VerifyAllocation(fn *ir.Function) error {
// All registers should be physical.
for _, i := range fn.Instructions() {
for _, r := range i.Registers() {
if reg.ToPhysical(r) == nil {
return errors.New("non physical register found")
}
}
}
return nil
}
// EnsureBasePointerCalleeSaved ensures that the base pointer register will be
// saved and restored if it has been clobbered by the function.
func EnsureBasePointerCalleeSaved(fn *ir.Function) error {
// Check to see if the base pointer is written to.
clobbered := false
for _, i := range fn.Instructions() {
for _, r := range i.OutputRegisters() {
if p := reg.ToPhysical(r); p != nil && (p.Info()&reg.BasePointer) != 0 {
clobbered = true
}
}
}
if !clobbered {
return nil
}
// This function clobbers the base pointer register so we need to ensure it
// will be saved and restored. The Go assembler will do this automatically,
// with a few exceptions detailed below. In summary, we can usually ensure
// this happens by ensuring the function is not frameless (apart from
// NOFRAME functions).
//
// Reference: https://github.com/golang/go/blob/3f4977bd5800beca059defb5de4dc64cd758cbb9/src/cmd/internal/obj/x86/obj6.go#L591-L609
//
// var bpsize int
// if ctxt.Arch.Family == sys.AMD64 &&
// !p.From.Sym.NoFrame() && // (1) below
// !(autoffset == 0 && p.From.Sym.NoSplit()) && // (2) below
// !(autoffset == 0 && !hasCall) { // (3) below
// // Make room to save a base pointer.
// // There are 2 cases we must avoid:
// // 1) If noframe is set (which we do for functions which tail call).
// // 2) Scary runtime internals which would be all messed up by frame pointers.
// // We detect these using a heuristic: frameless nosplit functions.
// // TODO: Maybe someday we label them all with NOFRAME and get rid of this heuristic.
// // For performance, we also want to avoid:
// // 3) Frameless leaf functions
// bpsize = ctxt.Arch.PtrSize
// autoffset += int32(bpsize)
// p.To.Offset += int64(bpsize)
// } else {
// bpsize = 0
// }
//
if fn.Attributes.NOFRAME() {
return errors.New("NOFRAME function clobbers base pointer register")
}
if fn.LocalSize == 0 {
fn.AllocLocal(int(gotypes.PointerSize))
}
return nil
}

42
vendor/github.com/mmcloughlin/avo/pass/textflag.go generated vendored Normal file
View File

@ -0,0 +1,42 @@
package pass
import (
"github.com/mmcloughlin/avo/attr"
"github.com/mmcloughlin/avo/ir"
)
// IncludeTextFlagHeader includes textflag.h if necessary.
func IncludeTextFlagHeader(f *ir.File) error {
const textflagheader = "textflag.h"
// Check if we already have it.
for _, path := range f.Includes {
if path == textflagheader {
return nil
}
}
// Add it if necessary.
if requirestextflags(f) {
f.Includes = append(f.Includes, textflagheader)
}
return nil
}
// requirestextflags returns whether the file uses flags in the textflags.h header.
func requirestextflags(f *ir.File) bool {
for _, s := range f.Sections {
var a attr.Attribute
switch s := s.(type) {
case *ir.Function:
a = s.Attributes
case *ir.Global:
a = s.Attributes
}
if a.ContainsTextFlags() {
return true
}
}
return false
}

32
vendor/github.com/mmcloughlin/avo/pass/verify.go generated vendored Normal file
View File

@ -0,0 +1,32 @@
package pass
import (
"errors"
"github.com/mmcloughlin/avo/ir"
"github.com/mmcloughlin/avo/operand"
)
// Verify pass validates an avo file.
var Verify = Concat(
InstructionPass(VerifyMemOperands),
)
// VerifyMemOperands checks the instruction's memory operands.
func VerifyMemOperands(i *ir.Instruction) error {
for _, op := range i.Operands {
m, ok := op.(operand.Mem)
if !ok {
continue
}
if m.Base == nil {
return errors.New("bad memory operand: missing base register")
}
if m.Index != nil && m.Scale == 0 {
return errors.New("bad memory operand: index register with scale 0")
}
}
return nil
}